1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
16 static bool __read_mostly enable_shadow_vmcs
= 1;
17 module_param_named(enable_shadow_vmcs
, enable_shadow_vmcs
, bool, S_IRUGO
);
19 static bool __read_mostly nested_early_check
= 0;
20 module_param(nested_early_check
, bool, S_IRUGO
);
23 * Hyper-V requires all of these, so mark them as supported even though
24 * they are just treated the same as all-context.
26 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
27 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
28 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
29 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
30 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
32 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
39 static unsigned long *vmx_bitmap
[VMX_BITMAP_NR
];
41 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
42 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
44 static u16 shadow_read_only_fields
[] = {
45 #define SHADOW_FIELD_RO(x) x,
46 #include "vmcs_shadow_fields.h"
48 static int max_shadow_read_only_fields
=
49 ARRAY_SIZE(shadow_read_only_fields
);
51 static u16 shadow_read_write_fields
[] = {
52 #define SHADOW_FIELD_RW(x) x,
53 #include "vmcs_shadow_fields.h"
55 static int max_shadow_read_write_fields
=
56 ARRAY_SIZE(shadow_read_write_fields
);
58 static void init_vmcs_shadow_fields(void)
62 memset(vmx_vmread_bitmap
, 0xff, PAGE_SIZE
);
63 memset(vmx_vmwrite_bitmap
, 0xff, PAGE_SIZE
);
65 for (i
= j
= 0; i
< max_shadow_read_only_fields
; i
++) {
66 u16 field
= shadow_read_only_fields
[i
];
68 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
69 (i
+ 1 == max_shadow_read_only_fields
||
70 shadow_read_only_fields
[i
+ 1] != field
+ 1))
71 pr_err("Missing field from shadow_read_only_field %x\n",
74 clear_bit(field
, vmx_vmread_bitmap
);
80 shadow_read_only_fields
[j
] = field
;
83 max_shadow_read_only_fields
= j
;
85 for (i
= j
= 0; i
< max_shadow_read_write_fields
; i
++) {
86 u16 field
= shadow_read_write_fields
[i
];
88 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
89 (i
+ 1 == max_shadow_read_write_fields
||
90 shadow_read_write_fields
[i
+ 1] != field
+ 1))
91 pr_err("Missing field from shadow_read_write_field %x\n",
95 * PML and the preemption timer can be emulated, but the
96 * processor cannot vmwrite to fields that don't exist
100 case GUEST_PML_INDEX
:
101 if (!cpu_has_vmx_pml())
104 case VMX_PREEMPTION_TIMER_VALUE
:
105 if (!cpu_has_vmx_preemption_timer())
108 case GUEST_INTR_STATUS
:
109 if (!cpu_has_vmx_apicv())
116 clear_bit(field
, vmx_vmwrite_bitmap
);
117 clear_bit(field
, vmx_vmread_bitmap
);
123 shadow_read_write_fields
[j
] = field
;
126 max_shadow_read_write_fields
= j
;
130 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
131 * set the success or error code of an emulated VMX instruction (as specified
132 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
135 static int nested_vmx_succeed(struct kvm_vcpu
*vcpu
)
137 vmx_set_rflags(vcpu
, vmx_get_rflags(vcpu
)
138 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
139 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
));
140 return kvm_skip_emulated_instruction(vcpu
);
143 static int nested_vmx_failInvalid(struct kvm_vcpu
*vcpu
)
145 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
146 & ~(X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
147 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
149 return kvm_skip_emulated_instruction(vcpu
);
152 static int nested_vmx_failValid(struct kvm_vcpu
*vcpu
,
153 u32 vm_instruction_error
)
155 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
158 * failValid writes the error number to the current VMCS, which
159 * can't be done if there isn't a current VMCS.
161 if (vmx
->nested
.current_vmptr
== -1ull && !vmx
->nested
.hv_evmcs
)
162 return nested_vmx_failInvalid(vcpu
);
164 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
165 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
166 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
168 get_vmcs12(vcpu
)->vm_instruction_error
= vm_instruction_error
;
170 * We don't need to force a shadow sync because
171 * VM_INSTRUCTION_ERROR is not shadowed
173 return kvm_skip_emulated_instruction(vcpu
);
176 static void nested_vmx_abort(struct kvm_vcpu
*vcpu
, u32 indicator
)
178 /* TODO: not to reset guest simply here. */
179 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
180 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator
);
183 static void vmx_disable_shadow_vmcs(struct vcpu_vmx
*vmx
)
185 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL
, SECONDARY_EXEC_SHADOW_VMCS
);
186 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
189 static inline void nested_release_evmcs(struct kvm_vcpu
*vcpu
)
191 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
193 if (!vmx
->nested
.hv_evmcs
)
196 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.hv_evmcs_map
, true);
197 vmx
->nested
.hv_evmcs_vmptr
= -1ull;
198 vmx
->nested
.hv_evmcs
= NULL
;
202 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
203 * just stops using VMX.
205 static void free_nested(struct kvm_vcpu
*vcpu
)
207 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
209 if (!vmx
->nested
.vmxon
&& !vmx
->nested
.smm
.vmxon
)
212 vmx
->nested
.vmxon
= false;
213 vmx
->nested
.smm
.vmxon
= false;
214 free_vpid(vmx
->nested
.vpid02
);
215 vmx
->nested
.posted_intr_nv
= -1;
216 vmx
->nested
.current_vmptr
= -1ull;
217 if (enable_shadow_vmcs
) {
218 vmx_disable_shadow_vmcs(vmx
);
219 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
220 free_vmcs(vmx
->vmcs01
.shadow_vmcs
);
221 vmx
->vmcs01
.shadow_vmcs
= NULL
;
223 kfree(vmx
->nested
.cached_vmcs12
);
224 kfree(vmx
->nested
.cached_shadow_vmcs12
);
225 /* Unpin physical memory we referred to in the vmcs02 */
226 if (vmx
->nested
.apic_access_page
) {
227 kvm_release_page_dirty(vmx
->nested
.apic_access_page
);
228 vmx
->nested
.apic_access_page
= NULL
;
230 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
231 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
232 vmx
->nested
.pi_desc
= NULL
;
234 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
236 nested_release_evmcs(vcpu
);
238 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
241 static void vmx_switch_vmcs(struct kvm_vcpu
*vcpu
, struct loaded_vmcs
*vmcs
)
243 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
246 if (vmx
->loaded_vmcs
== vmcs
)
251 vmx
->loaded_vmcs
= vmcs
;
252 vmx_vcpu_load(vcpu
, cpu
);
255 vm_entry_controls_reset_shadow(vmx
);
256 vm_exit_controls_reset_shadow(vmx
);
257 vmx_segment_cache_clear(vmx
);
261 * Ensure that the current vmcs of the logical processor is the
262 * vmcs01 of the vcpu before calling free_nested().
264 void nested_vmx_free_vcpu(struct kvm_vcpu
*vcpu
)
267 vmx_leave_nested(vcpu
);
268 vmx_switch_vmcs(vcpu
, &to_vmx(vcpu
)->vmcs01
);
273 static void nested_ept_inject_page_fault(struct kvm_vcpu
*vcpu
,
274 struct x86_exception
*fault
)
276 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
277 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
279 unsigned long exit_qualification
= vcpu
->arch
.exit_qualification
;
281 if (vmx
->nested
.pml_full
) {
282 exit_reason
= EXIT_REASON_PML_FULL
;
283 vmx
->nested
.pml_full
= false;
284 exit_qualification
&= INTR_INFO_UNBLOCK_NMI
;
285 } else if (fault
->error_code
& PFERR_RSVD_MASK
)
286 exit_reason
= EXIT_REASON_EPT_MISCONFIG
;
288 exit_reason
= EXIT_REASON_EPT_VIOLATION
;
290 nested_vmx_vmexit(vcpu
, exit_reason
, 0, exit_qualification
);
291 vmcs12
->guest_physical_address
= fault
->address
;
294 static void nested_ept_init_mmu_context(struct kvm_vcpu
*vcpu
)
296 WARN_ON(mmu_is_nested(vcpu
));
298 vcpu
->arch
.mmu
= &vcpu
->arch
.guest_mmu
;
299 kvm_init_shadow_ept_mmu(vcpu
,
300 to_vmx(vcpu
)->nested
.msrs
.ept_caps
&
301 VMX_EPT_EXECUTE_ONLY_BIT
,
302 nested_ept_ad_enabled(vcpu
),
303 nested_ept_get_cr3(vcpu
));
304 vcpu
->arch
.mmu
->set_cr3
= vmx_set_cr3
;
305 vcpu
->arch
.mmu
->get_cr3
= nested_ept_get_cr3
;
306 vcpu
->arch
.mmu
->inject_page_fault
= nested_ept_inject_page_fault
;
307 vcpu
->arch
.mmu
->get_pdptr
= kvm_pdptr_read
;
309 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.nested_mmu
;
312 static void nested_ept_uninit_mmu_context(struct kvm_vcpu
*vcpu
)
314 vcpu
->arch
.mmu
= &vcpu
->arch
.root_mmu
;
315 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.root_mmu
;
318 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12
*vmcs12
,
321 bool inequality
, bit
;
323 bit
= (vmcs12
->exception_bitmap
& (1u << PF_VECTOR
)) != 0;
325 (error_code
& vmcs12
->page_fault_error_code_mask
) !=
326 vmcs12
->page_fault_error_code_match
;
327 return inequality
^ bit
;
332 * KVM wants to inject page-faults which it got to the guest. This function
333 * checks whether in a nested guest, we need to inject them to L1 or L2.
335 static int nested_vmx_check_exception(struct kvm_vcpu
*vcpu
, unsigned long *exit_qual
)
337 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
338 unsigned int nr
= vcpu
->arch
.exception
.nr
;
339 bool has_payload
= vcpu
->arch
.exception
.has_payload
;
340 unsigned long payload
= vcpu
->arch
.exception
.payload
;
342 if (nr
== PF_VECTOR
) {
343 if (vcpu
->arch
.exception
.nested_apf
) {
344 *exit_qual
= vcpu
->arch
.apf
.nested_apf_token
;
347 if (nested_vmx_is_page_fault_vmexit(vmcs12
,
348 vcpu
->arch
.exception
.error_code
)) {
349 *exit_qual
= has_payload
? payload
: vcpu
->arch
.cr2
;
352 } else if (vmcs12
->exception_bitmap
& (1u << nr
)) {
353 if (nr
== DB_VECTOR
) {
355 payload
= vcpu
->arch
.dr6
;
356 payload
&= ~(DR6_FIXED_1
| DR6_BT
);
359 *exit_qual
= payload
;
369 static void vmx_inject_page_fault_nested(struct kvm_vcpu
*vcpu
,
370 struct x86_exception
*fault
)
372 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
374 WARN_ON(!is_guest_mode(vcpu
));
376 if (nested_vmx_is_page_fault_vmexit(vmcs12
, fault
->error_code
) &&
377 !to_vmx(vcpu
)->nested
.nested_run_pending
) {
378 vmcs12
->vm_exit_intr_error_code
= fault
->error_code
;
379 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
380 PF_VECTOR
| INTR_TYPE_HARD_EXCEPTION
|
381 INTR_INFO_DELIVER_CODE_MASK
| INTR_INFO_VALID_MASK
,
384 kvm_inject_page_fault(vcpu
, fault
);
388 static bool page_address_valid(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
390 return PAGE_ALIGNED(gpa
) && !(gpa
>> cpuid_maxphyaddr(vcpu
));
393 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu
*vcpu
,
394 struct vmcs12
*vmcs12
)
396 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
399 if (!page_address_valid(vcpu
, vmcs12
->io_bitmap_a
) ||
400 !page_address_valid(vcpu
, vmcs12
->io_bitmap_b
))
406 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu
*vcpu
,
407 struct vmcs12
*vmcs12
)
409 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
412 if (!page_address_valid(vcpu
, vmcs12
->msr_bitmap
))
418 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu
*vcpu
,
419 struct vmcs12
*vmcs12
)
421 if (!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
424 if (!page_address_valid(vcpu
, vmcs12
->virtual_apic_page_addr
))
431 * Check if MSR is intercepted for L01 MSR bitmap.
433 static bool msr_write_intercepted_l01(struct kvm_vcpu
*vcpu
, u32 msr
)
435 unsigned long *msr_bitmap
;
436 int f
= sizeof(unsigned long);
438 if (!cpu_has_vmx_msr_bitmap())
441 msr_bitmap
= to_vmx(vcpu
)->vmcs01
.msr_bitmap
;
444 return !!test_bit(msr
, msr_bitmap
+ 0x800 / f
);
445 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
447 return !!test_bit(msr
, msr_bitmap
+ 0xc00 / f
);
454 * If a msr is allowed by L0, we should check whether it is allowed by L1.
455 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
457 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1
,
458 unsigned long *msr_bitmap_nested
,
461 int f
= sizeof(unsigned long);
464 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
465 * have the write-low and read-high bitmap offsets the wrong way round.
466 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
469 if (type
& MSR_TYPE_R
&&
470 !test_bit(msr
, msr_bitmap_l1
+ 0x000 / f
))
472 __clear_bit(msr
, msr_bitmap_nested
+ 0x000 / f
);
474 if (type
& MSR_TYPE_W
&&
475 !test_bit(msr
, msr_bitmap_l1
+ 0x800 / f
))
477 __clear_bit(msr
, msr_bitmap_nested
+ 0x800 / f
);
479 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
481 if (type
& MSR_TYPE_R
&&
482 !test_bit(msr
, msr_bitmap_l1
+ 0x400 / f
))
484 __clear_bit(msr
, msr_bitmap_nested
+ 0x400 / f
);
486 if (type
& MSR_TYPE_W
&&
487 !test_bit(msr
, msr_bitmap_l1
+ 0xc00 / f
))
489 __clear_bit(msr
, msr_bitmap_nested
+ 0xc00 / f
);
494 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap
) {
497 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
498 unsigned word
= msr
/ BITS_PER_LONG
;
500 msr_bitmap
[word
] = ~0;
501 msr_bitmap
[word
+ (0x800 / sizeof(long))] = ~0;
506 * Merge L0's and L1's MSR bitmap, return false to indicate that
507 * we do not use the hardware.
509 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu
*vcpu
,
510 struct vmcs12
*vmcs12
)
513 unsigned long *msr_bitmap_l1
;
514 unsigned long *msr_bitmap_l0
= to_vmx(vcpu
)->nested
.vmcs02
.msr_bitmap
;
515 struct kvm_host_map
*map
= &to_vmx(vcpu
)->nested
.msr_bitmap_map
;
517 /* Nothing to do if the MSR bitmap is not in use. */
518 if (!cpu_has_vmx_msr_bitmap() ||
519 !nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
522 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->msr_bitmap
), map
))
525 msr_bitmap_l1
= (unsigned long *)map
->hva
;
528 * To keep the control flow simple, pay eight 8-byte writes (sixteen
529 * 4-byte writes on 32-bit systems) up front to enable intercepts for
530 * the x2APIC MSR range and selectively disable them below.
532 enable_x2apic_msr_intercepts(msr_bitmap_l0
);
534 if (nested_cpu_has_virt_x2apic_mode(vmcs12
)) {
535 if (nested_cpu_has_apic_reg_virt(vmcs12
)) {
537 * L0 need not intercept reads for MSRs between 0x800
538 * and 0x8ff, it just lets the processor take the value
539 * from the virtual-APIC page; take those 256 bits
540 * directly from the L1 bitmap.
542 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
543 unsigned word
= msr
/ BITS_PER_LONG
;
545 msr_bitmap_l0
[word
] = msr_bitmap_l1
[word
];
549 nested_vmx_disable_intercept_for_msr(
550 msr_bitmap_l1
, msr_bitmap_l0
,
551 X2APIC_MSR(APIC_TASKPRI
),
552 MSR_TYPE_R
| MSR_TYPE_W
);
554 if (nested_cpu_has_vid(vmcs12
)) {
555 nested_vmx_disable_intercept_for_msr(
556 msr_bitmap_l1
, msr_bitmap_l0
,
557 X2APIC_MSR(APIC_EOI
),
559 nested_vmx_disable_intercept_for_msr(
560 msr_bitmap_l1
, msr_bitmap_l0
,
561 X2APIC_MSR(APIC_SELF_IPI
),
566 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
567 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
568 MSR_FS_BASE
, MSR_TYPE_RW
);
570 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
571 MSR_GS_BASE
, MSR_TYPE_RW
);
573 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
574 MSR_KERNEL_GS_BASE
, MSR_TYPE_RW
);
577 * Checking the L0->L1 bitmap is trying to verify two things:
579 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
580 * ensures that we do not accidentally generate an L02 MSR bitmap
581 * from the L12 MSR bitmap that is too permissive.
582 * 2. That L1 or L2s have actually used the MSR. This avoids
583 * unnecessarily merging of the bitmap if the MSR is unused. This
584 * works properly because we only update the L01 MSR bitmap lazily.
585 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
586 * updated to reflect this when L1 (or its L2s) actually write to
589 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_SPEC_CTRL
))
590 nested_vmx_disable_intercept_for_msr(
591 msr_bitmap_l1
, msr_bitmap_l0
,
593 MSR_TYPE_R
| MSR_TYPE_W
);
595 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_PRED_CMD
))
596 nested_vmx_disable_intercept_for_msr(
597 msr_bitmap_l1
, msr_bitmap_l0
,
601 kvm_vcpu_unmap(vcpu
, &to_vmx(vcpu
)->nested
.msr_bitmap_map
, false);
606 static void nested_cache_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
607 struct vmcs12
*vmcs12
)
609 struct kvm_host_map map
;
610 struct vmcs12
*shadow
;
612 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
613 vmcs12
->vmcs_link_pointer
== -1ull)
616 shadow
= get_shadow_vmcs12(vcpu
);
618 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
))
621 memcpy(shadow
, map
.hva
, VMCS12_SIZE
);
622 kvm_vcpu_unmap(vcpu
, &map
, false);
625 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
626 struct vmcs12
*vmcs12
)
628 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
630 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
631 vmcs12
->vmcs_link_pointer
== -1ull)
634 kvm_write_guest(vmx
->vcpu
.kvm
, vmcs12
->vmcs_link_pointer
,
635 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
);
639 * In nested virtualization, check if L1 has set
640 * VM_EXIT_ACK_INTR_ON_EXIT
642 static bool nested_exit_intr_ack_set(struct kvm_vcpu
*vcpu
)
644 return get_vmcs12(vcpu
)->vm_exit_controls
&
645 VM_EXIT_ACK_INTR_ON_EXIT
;
648 static bool nested_exit_on_nmi(struct kvm_vcpu
*vcpu
)
650 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu
));
653 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu
*vcpu
,
654 struct vmcs12
*vmcs12
)
656 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
) &&
657 !page_address_valid(vcpu
, vmcs12
->apic_access_addr
))
663 static int nested_vmx_check_apicv_controls(struct kvm_vcpu
*vcpu
,
664 struct vmcs12
*vmcs12
)
666 if (!nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
667 !nested_cpu_has_apic_reg_virt(vmcs12
) &&
668 !nested_cpu_has_vid(vmcs12
) &&
669 !nested_cpu_has_posted_intr(vmcs12
))
673 * If virtualize x2apic mode is enabled,
674 * virtualize apic access must be disabled.
676 if (nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
677 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
681 * If virtual interrupt delivery is enabled,
682 * we must exit on external interrupts.
684 if (nested_cpu_has_vid(vmcs12
) &&
685 !nested_exit_on_intr(vcpu
))
689 * bits 15:8 should be zero in posted_intr_nv,
690 * the descriptor address has been already checked
691 * in nested_get_vmcs12_pages.
693 * bits 5:0 of posted_intr_desc_addr should be zero.
695 if (nested_cpu_has_posted_intr(vmcs12
) &&
696 (!nested_cpu_has_vid(vmcs12
) ||
697 !nested_exit_intr_ack_set(vcpu
) ||
698 (vmcs12
->posted_intr_nv
& 0xff00) ||
699 (vmcs12
->posted_intr_desc_addr
& 0x3f) ||
700 (vmcs12
->posted_intr_desc_addr
>> cpuid_maxphyaddr(vcpu
))))
703 /* tpr shadow is needed by all apicv features. */
704 if (!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
710 static int nested_vmx_check_msr_switch(struct kvm_vcpu
*vcpu
,
717 maxphyaddr
= cpuid_maxphyaddr(vcpu
);
718 if (!IS_ALIGNED(addr
, 16) || addr
>> maxphyaddr
||
719 (addr
+ count
* sizeof(struct vmx_msr_entry
) - 1) >> maxphyaddr
)
725 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu
*vcpu
,
726 struct vmcs12
*vmcs12
)
728 if (nested_vmx_check_msr_switch(vcpu
, vmcs12
->vm_exit_msr_load_count
,
729 vmcs12
->vm_exit_msr_load_addr
) ||
730 nested_vmx_check_msr_switch(vcpu
, vmcs12
->vm_exit_msr_store_count
,
731 vmcs12
->vm_exit_msr_store_addr
))
737 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu
*vcpu
,
738 struct vmcs12
*vmcs12
)
740 if (nested_vmx_check_msr_switch(vcpu
, vmcs12
->vm_entry_msr_load_count
,
741 vmcs12
->vm_entry_msr_load_addr
))
747 static int nested_vmx_check_pml_controls(struct kvm_vcpu
*vcpu
,
748 struct vmcs12
*vmcs12
)
750 if (!nested_cpu_has_pml(vmcs12
))
753 if (!nested_cpu_has_ept(vmcs12
) ||
754 !page_address_valid(vcpu
, vmcs12
->pml_address
))
760 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu
*vcpu
,
761 struct vmcs12
*vmcs12
)
763 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_UNRESTRICTED_GUEST
) &&
764 !nested_cpu_has_ept(vmcs12
))
769 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu
*vcpu
,
770 struct vmcs12
*vmcs12
)
772 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_MODE_BASED_EPT_EXEC
) &&
773 !nested_cpu_has_ept(vmcs12
))
778 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu
*vcpu
,
779 struct vmcs12
*vmcs12
)
781 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
784 if (!page_address_valid(vcpu
, vmcs12
->vmread_bitmap
) ||
785 !page_address_valid(vcpu
, vmcs12
->vmwrite_bitmap
))
791 static int nested_vmx_msr_check_common(struct kvm_vcpu
*vcpu
,
792 struct vmx_msr_entry
*e
)
794 /* x2APIC MSR accesses are not allowed */
795 if (vcpu
->arch
.apic_base
& X2APIC_ENABLE
&& e
->index
>> 8 == 0x8)
797 if (e
->index
== MSR_IA32_UCODE_WRITE
|| /* SDM Table 35-2 */
798 e
->index
== MSR_IA32_UCODE_REV
)
800 if (e
->reserved
!= 0)
805 static int nested_vmx_load_msr_check(struct kvm_vcpu
*vcpu
,
806 struct vmx_msr_entry
*e
)
808 if (e
->index
== MSR_FS_BASE
||
809 e
->index
== MSR_GS_BASE
||
810 e
->index
== MSR_IA32_SMM_MONITOR_CTL
|| /* SMM is not supported */
811 nested_vmx_msr_check_common(vcpu
, e
))
816 static int nested_vmx_store_msr_check(struct kvm_vcpu
*vcpu
,
817 struct vmx_msr_entry
*e
)
819 if (e
->index
== MSR_IA32_SMBASE
|| /* SMM is not supported */
820 nested_vmx_msr_check_common(vcpu
, e
))
826 * Load guest's/host's msr at nested entry/exit.
827 * return 0 for success, entry index for failure.
829 static u32
nested_vmx_load_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
832 struct vmx_msr_entry e
;
835 msr
.host_initiated
= false;
836 for (i
= 0; i
< count
; i
++) {
837 if (kvm_vcpu_read_guest(vcpu
, gpa
+ i
* sizeof(e
),
839 pr_debug_ratelimited(
840 "%s cannot read MSR entry (%u, 0x%08llx)\n",
841 __func__
, i
, gpa
+ i
* sizeof(e
));
844 if (nested_vmx_load_msr_check(vcpu
, &e
)) {
845 pr_debug_ratelimited(
846 "%s check failed (%u, 0x%x, 0x%x)\n",
847 __func__
, i
, e
.index
, e
.reserved
);
852 if (kvm_set_msr(vcpu
, &msr
)) {
853 pr_debug_ratelimited(
854 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
855 __func__
, i
, e
.index
, e
.value
);
864 static int nested_vmx_store_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
867 struct vmx_msr_entry e
;
869 for (i
= 0; i
< count
; i
++) {
870 struct msr_data msr_info
;
871 if (kvm_vcpu_read_guest(vcpu
,
873 &e
, 2 * sizeof(u32
))) {
874 pr_debug_ratelimited(
875 "%s cannot read MSR entry (%u, 0x%08llx)\n",
876 __func__
, i
, gpa
+ i
* sizeof(e
));
879 if (nested_vmx_store_msr_check(vcpu
, &e
)) {
880 pr_debug_ratelimited(
881 "%s check failed (%u, 0x%x, 0x%x)\n",
882 __func__
, i
, e
.index
, e
.reserved
);
885 msr_info
.host_initiated
= false;
886 msr_info
.index
= e
.index
;
887 if (kvm_get_msr(vcpu
, &msr_info
)) {
888 pr_debug_ratelimited(
889 "%s cannot read MSR (%u, 0x%x)\n",
890 __func__
, i
, e
.index
);
893 if (kvm_vcpu_write_guest(vcpu
,
894 gpa
+ i
* sizeof(e
) +
895 offsetof(struct vmx_msr_entry
, value
),
896 &msr_info
.data
, sizeof(msr_info
.data
))) {
897 pr_debug_ratelimited(
898 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
899 __func__
, i
, e
.index
, msr_info
.data
);
906 static bool nested_cr3_valid(struct kvm_vcpu
*vcpu
, unsigned long val
)
908 unsigned long invalid_mask
;
910 invalid_mask
= (~0ULL) << cpuid_maxphyaddr(vcpu
);
911 return (val
& invalid_mask
) == 0;
915 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
916 * emulating VM entry into a guest with EPT enabled.
917 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
918 * is assigned to entry_failure_code on failure.
920 static int nested_vmx_load_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
, bool nested_ept
,
921 u32
*entry_failure_code
)
923 if (cr3
!= kvm_read_cr3(vcpu
) || (!nested_ept
&& pdptrs_changed(vcpu
))) {
924 if (!nested_cr3_valid(vcpu
, cr3
)) {
925 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
930 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
931 * must not be dereferenced.
933 if (!is_long_mode(vcpu
) && is_pae(vcpu
) && is_paging(vcpu
) &&
935 if (!load_pdptrs(vcpu
, vcpu
->arch
.walk_mmu
, cr3
)) {
936 *entry_failure_code
= ENTRY_FAIL_PDPTE
;
943 kvm_mmu_new_cr3(vcpu
, cr3
, false);
945 vcpu
->arch
.cr3
= cr3
;
946 __set_bit(VCPU_EXREG_CR3
, (ulong
*)&vcpu
->arch
.regs_avail
);
948 kvm_init_mmu(vcpu
, false);
954 * Returns if KVM is able to config CPU to tag TLB entries
955 * populated by L2 differently than TLB entries populated
958 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
960 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
961 * with different VPID (L1 entries are tagged with vmx->vpid
962 * while L2 entries are tagged with vmx->nested.vpid02).
964 static bool nested_has_guest_tlb_tag(struct kvm_vcpu
*vcpu
)
966 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
968 return nested_cpu_has_ept(vmcs12
) ||
969 (nested_cpu_has_vpid(vmcs12
) && to_vmx(vcpu
)->nested
.vpid02
);
972 static u16
nested_get_vpid02(struct kvm_vcpu
*vcpu
)
974 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
976 return vmx
->nested
.vpid02
? vmx
->nested
.vpid02
: vmx
->vpid
;
980 static inline bool vmx_control_verify(u32 control
, u32 low
, u32 high
)
982 return fixed_bits_valid(control
, low
, high
);
985 static inline u64
vmx_control_msr(u32 low
, u32 high
)
987 return low
| ((u64
)high
<< 32);
990 static bool is_bitwise_subset(u64 superset
, u64 subset
, u64 mask
)
995 return (superset
| subset
) == superset
;
998 static int vmx_restore_vmx_basic(struct vcpu_vmx
*vmx
, u64 data
)
1000 const u64 feature_and_reserved
=
1001 /* feature (except bit 48; see below) */
1002 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1004 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1005 u64 vmx_basic
= vmx
->nested
.msrs
.basic
;
1007 if (!is_bitwise_subset(vmx_basic
, data
, feature_and_reserved
))
1011 * KVM does not emulate a version of VMX that constrains physical
1012 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1014 if (data
& BIT_ULL(48))
1017 if (vmx_basic_vmcs_revision_id(vmx_basic
) !=
1018 vmx_basic_vmcs_revision_id(data
))
1021 if (vmx_basic_vmcs_size(vmx_basic
) > vmx_basic_vmcs_size(data
))
1024 vmx
->nested
.msrs
.basic
= data
;
1029 vmx_restore_control_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1034 switch (msr_index
) {
1035 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1036 lowp
= &vmx
->nested
.msrs
.pinbased_ctls_low
;
1037 highp
= &vmx
->nested
.msrs
.pinbased_ctls_high
;
1039 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1040 lowp
= &vmx
->nested
.msrs
.procbased_ctls_low
;
1041 highp
= &vmx
->nested
.msrs
.procbased_ctls_high
;
1043 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1044 lowp
= &vmx
->nested
.msrs
.exit_ctls_low
;
1045 highp
= &vmx
->nested
.msrs
.exit_ctls_high
;
1047 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1048 lowp
= &vmx
->nested
.msrs
.entry_ctls_low
;
1049 highp
= &vmx
->nested
.msrs
.entry_ctls_high
;
1051 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1052 lowp
= &vmx
->nested
.msrs
.secondary_ctls_low
;
1053 highp
= &vmx
->nested
.msrs
.secondary_ctls_high
;
1059 supported
= vmx_control_msr(*lowp
, *highp
);
1061 /* Check must-be-1 bits are still 1. */
1062 if (!is_bitwise_subset(data
, supported
, GENMASK_ULL(31, 0)))
1065 /* Check must-be-0 bits are still 0. */
1066 if (!is_bitwise_subset(supported
, data
, GENMASK_ULL(63, 32)))
1070 *highp
= data
>> 32;
1074 static int vmx_restore_vmx_misc(struct vcpu_vmx
*vmx
, u64 data
)
1076 const u64 feature_and_reserved_bits
=
1078 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1079 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1081 GENMASK_ULL(13, 9) | BIT_ULL(31);
1084 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
1085 vmx
->nested
.msrs
.misc_high
);
1087 if (!is_bitwise_subset(vmx_misc
, data
, feature_and_reserved_bits
))
1090 if ((vmx
->nested
.msrs
.pinbased_ctls_high
&
1091 PIN_BASED_VMX_PREEMPTION_TIMER
) &&
1092 vmx_misc_preemption_timer_rate(data
) !=
1093 vmx_misc_preemption_timer_rate(vmx_misc
))
1096 if (vmx_misc_cr3_count(data
) > vmx_misc_cr3_count(vmx_misc
))
1099 if (vmx_misc_max_msr(data
) > vmx_misc_max_msr(vmx_misc
))
1102 if (vmx_misc_mseg_revid(data
) != vmx_misc_mseg_revid(vmx_misc
))
1105 vmx
->nested
.msrs
.misc_low
= data
;
1106 vmx
->nested
.msrs
.misc_high
= data
>> 32;
1109 * If L1 has read-only VM-exit information fields, use the
1110 * less permissive vmx_vmwrite_bitmap to specify write
1111 * permissions for the shadow VMCS.
1113 if (enable_shadow_vmcs
&& !nested_cpu_has_vmwrite_any_field(&vmx
->vcpu
))
1114 vmcs_write64(VMWRITE_BITMAP
, __pa(vmx_vmwrite_bitmap
));
1119 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx
*vmx
, u64 data
)
1121 u64 vmx_ept_vpid_cap
;
1123 vmx_ept_vpid_cap
= vmx_control_msr(vmx
->nested
.msrs
.ept_caps
,
1124 vmx
->nested
.msrs
.vpid_caps
);
1126 /* Every bit is either reserved or a feature bit. */
1127 if (!is_bitwise_subset(vmx_ept_vpid_cap
, data
, -1ULL))
1130 vmx
->nested
.msrs
.ept_caps
= data
;
1131 vmx
->nested
.msrs
.vpid_caps
= data
>> 32;
1135 static int vmx_restore_fixed0_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1139 switch (msr_index
) {
1140 case MSR_IA32_VMX_CR0_FIXED0
:
1141 msr
= &vmx
->nested
.msrs
.cr0_fixed0
;
1143 case MSR_IA32_VMX_CR4_FIXED0
:
1144 msr
= &vmx
->nested
.msrs
.cr4_fixed0
;
1151 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1152 * must be 1 in the restored value.
1154 if (!is_bitwise_subset(data
, *msr
, -1ULL))
1162 * Called when userspace is restoring VMX MSRs.
1164 * Returns 0 on success, non-0 otherwise.
1166 int vmx_set_vmx_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1168 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1171 * Don't allow changes to the VMX capability MSRs while the vCPU
1172 * is in VMX operation.
1174 if (vmx
->nested
.vmxon
)
1177 switch (msr_index
) {
1178 case MSR_IA32_VMX_BASIC
:
1179 return vmx_restore_vmx_basic(vmx
, data
);
1180 case MSR_IA32_VMX_PINBASED_CTLS
:
1181 case MSR_IA32_VMX_PROCBASED_CTLS
:
1182 case MSR_IA32_VMX_EXIT_CTLS
:
1183 case MSR_IA32_VMX_ENTRY_CTLS
:
1185 * The "non-true" VMX capability MSRs are generated from the
1186 * "true" MSRs, so we do not support restoring them directly.
1188 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1189 * should restore the "true" MSRs with the must-be-1 bits
1190 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1191 * DEFAULT SETTINGS".
1194 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1195 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1196 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1197 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1198 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1199 return vmx_restore_control_msr(vmx
, msr_index
, data
);
1200 case MSR_IA32_VMX_MISC
:
1201 return vmx_restore_vmx_misc(vmx
, data
);
1202 case MSR_IA32_VMX_CR0_FIXED0
:
1203 case MSR_IA32_VMX_CR4_FIXED0
:
1204 return vmx_restore_fixed0_msr(vmx
, msr_index
, data
);
1205 case MSR_IA32_VMX_CR0_FIXED1
:
1206 case MSR_IA32_VMX_CR4_FIXED1
:
1208 * These MSRs are generated based on the vCPU's CPUID, so we
1209 * do not support restoring them directly.
1212 case MSR_IA32_VMX_EPT_VPID_CAP
:
1213 return vmx_restore_vmx_ept_vpid_cap(vmx
, data
);
1214 case MSR_IA32_VMX_VMCS_ENUM
:
1215 vmx
->nested
.msrs
.vmcs_enum
= data
;
1219 * The rest of the VMX capability MSRs do not support restore.
1225 /* Returns 0 on success, non-0 otherwise. */
1226 int vmx_get_vmx_msr(struct nested_vmx_msrs
*msrs
, u32 msr_index
, u64
*pdata
)
1228 switch (msr_index
) {
1229 case MSR_IA32_VMX_BASIC
:
1230 *pdata
= msrs
->basic
;
1232 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1233 case MSR_IA32_VMX_PINBASED_CTLS
:
1234 *pdata
= vmx_control_msr(
1235 msrs
->pinbased_ctls_low
,
1236 msrs
->pinbased_ctls_high
);
1237 if (msr_index
== MSR_IA32_VMX_PINBASED_CTLS
)
1238 *pdata
|= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1240 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1241 case MSR_IA32_VMX_PROCBASED_CTLS
:
1242 *pdata
= vmx_control_msr(
1243 msrs
->procbased_ctls_low
,
1244 msrs
->procbased_ctls_high
);
1245 if (msr_index
== MSR_IA32_VMX_PROCBASED_CTLS
)
1246 *pdata
|= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1248 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1249 case MSR_IA32_VMX_EXIT_CTLS
:
1250 *pdata
= vmx_control_msr(
1251 msrs
->exit_ctls_low
,
1252 msrs
->exit_ctls_high
);
1253 if (msr_index
== MSR_IA32_VMX_EXIT_CTLS
)
1254 *pdata
|= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
1256 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1257 case MSR_IA32_VMX_ENTRY_CTLS
:
1258 *pdata
= vmx_control_msr(
1259 msrs
->entry_ctls_low
,
1260 msrs
->entry_ctls_high
);
1261 if (msr_index
== MSR_IA32_VMX_ENTRY_CTLS
)
1262 *pdata
|= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
1264 case MSR_IA32_VMX_MISC
:
1265 *pdata
= vmx_control_msr(
1269 case MSR_IA32_VMX_CR0_FIXED0
:
1270 *pdata
= msrs
->cr0_fixed0
;
1272 case MSR_IA32_VMX_CR0_FIXED1
:
1273 *pdata
= msrs
->cr0_fixed1
;
1275 case MSR_IA32_VMX_CR4_FIXED0
:
1276 *pdata
= msrs
->cr4_fixed0
;
1278 case MSR_IA32_VMX_CR4_FIXED1
:
1279 *pdata
= msrs
->cr4_fixed1
;
1281 case MSR_IA32_VMX_VMCS_ENUM
:
1282 *pdata
= msrs
->vmcs_enum
;
1284 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1285 *pdata
= vmx_control_msr(
1286 msrs
->secondary_ctls_low
,
1287 msrs
->secondary_ctls_high
);
1289 case MSR_IA32_VMX_EPT_VPID_CAP
:
1290 *pdata
= msrs
->ept_caps
|
1291 ((u64
)msrs
->vpid_caps
<< 32);
1293 case MSR_IA32_VMX_VMFUNC
:
1294 *pdata
= msrs
->vmfunc_controls
;
1304 * Copy the writable VMCS shadow fields back to the VMCS12, in case
1305 * they have been modified by the L1 guest. Note that the "read-only"
1306 * VM-exit information fields are actually writable if the vCPU is
1307 * configured to support "VMWRITE to any supported field in the VMCS."
1309 static void copy_shadow_to_vmcs12(struct vcpu_vmx
*vmx
)
1311 const u16
*fields
[] = {
1312 shadow_read_write_fields
,
1313 shadow_read_only_fields
1315 const int max_fields
[] = {
1316 max_shadow_read_write_fields
,
1317 max_shadow_read_only_fields
1320 unsigned long field
;
1322 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1326 vmcs_load(shadow_vmcs
);
1328 for (q
= 0; q
< ARRAY_SIZE(fields
); q
++) {
1329 for (i
= 0; i
< max_fields
[q
]; i
++) {
1330 field
= fields
[q
][i
];
1331 field_value
= __vmcs_readl(field
);
1332 vmcs12_write_any(get_vmcs12(&vmx
->vcpu
), field
, field_value
);
1335 * Skip the VM-exit information fields if they are read-only.
1337 if (!nested_cpu_has_vmwrite_any_field(&vmx
->vcpu
))
1341 vmcs_clear(shadow_vmcs
);
1342 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1347 static void copy_vmcs12_to_shadow(struct vcpu_vmx
*vmx
)
1349 const u16
*fields
[] = {
1350 shadow_read_write_fields
,
1351 shadow_read_only_fields
1353 const int max_fields
[] = {
1354 max_shadow_read_write_fields
,
1355 max_shadow_read_only_fields
1358 unsigned long field
;
1359 u64 field_value
= 0;
1360 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1362 vmcs_load(shadow_vmcs
);
1364 for (q
= 0; q
< ARRAY_SIZE(fields
); q
++) {
1365 for (i
= 0; i
< max_fields
[q
]; i
++) {
1366 field
= fields
[q
][i
];
1367 vmcs12_read_any(get_vmcs12(&vmx
->vcpu
), field
, &field_value
);
1368 __vmcs_writel(field
, field_value
);
1372 vmcs_clear(shadow_vmcs
);
1373 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1376 static int copy_enlightened_to_vmcs12(struct vcpu_vmx
*vmx
)
1378 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1379 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1381 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1382 vmcs12
->tpr_threshold
= evmcs
->tpr_threshold
;
1383 vmcs12
->guest_rip
= evmcs
->guest_rip
;
1385 if (unlikely(!(evmcs
->hv_clean_fields
&
1386 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC
))) {
1387 vmcs12
->guest_rsp
= evmcs
->guest_rsp
;
1388 vmcs12
->guest_rflags
= evmcs
->guest_rflags
;
1389 vmcs12
->guest_interruptibility_info
=
1390 evmcs
->guest_interruptibility_info
;
1393 if (unlikely(!(evmcs
->hv_clean_fields
&
1394 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC
))) {
1395 vmcs12
->cpu_based_vm_exec_control
=
1396 evmcs
->cpu_based_vm_exec_control
;
1399 if (unlikely(!(evmcs
->hv_clean_fields
&
1400 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC
))) {
1401 vmcs12
->exception_bitmap
= evmcs
->exception_bitmap
;
1404 if (unlikely(!(evmcs
->hv_clean_fields
&
1405 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY
))) {
1406 vmcs12
->vm_entry_controls
= evmcs
->vm_entry_controls
;
1409 if (unlikely(!(evmcs
->hv_clean_fields
&
1410 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT
))) {
1411 vmcs12
->vm_entry_intr_info_field
=
1412 evmcs
->vm_entry_intr_info_field
;
1413 vmcs12
->vm_entry_exception_error_code
=
1414 evmcs
->vm_entry_exception_error_code
;
1415 vmcs12
->vm_entry_instruction_len
=
1416 evmcs
->vm_entry_instruction_len
;
1419 if (unlikely(!(evmcs
->hv_clean_fields
&
1420 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1
))) {
1421 vmcs12
->host_ia32_pat
= evmcs
->host_ia32_pat
;
1422 vmcs12
->host_ia32_efer
= evmcs
->host_ia32_efer
;
1423 vmcs12
->host_cr0
= evmcs
->host_cr0
;
1424 vmcs12
->host_cr3
= evmcs
->host_cr3
;
1425 vmcs12
->host_cr4
= evmcs
->host_cr4
;
1426 vmcs12
->host_ia32_sysenter_esp
= evmcs
->host_ia32_sysenter_esp
;
1427 vmcs12
->host_ia32_sysenter_eip
= evmcs
->host_ia32_sysenter_eip
;
1428 vmcs12
->host_rip
= evmcs
->host_rip
;
1429 vmcs12
->host_ia32_sysenter_cs
= evmcs
->host_ia32_sysenter_cs
;
1430 vmcs12
->host_es_selector
= evmcs
->host_es_selector
;
1431 vmcs12
->host_cs_selector
= evmcs
->host_cs_selector
;
1432 vmcs12
->host_ss_selector
= evmcs
->host_ss_selector
;
1433 vmcs12
->host_ds_selector
= evmcs
->host_ds_selector
;
1434 vmcs12
->host_fs_selector
= evmcs
->host_fs_selector
;
1435 vmcs12
->host_gs_selector
= evmcs
->host_gs_selector
;
1436 vmcs12
->host_tr_selector
= evmcs
->host_tr_selector
;
1439 if (unlikely(!(evmcs
->hv_clean_fields
&
1440 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1
))) {
1441 vmcs12
->pin_based_vm_exec_control
=
1442 evmcs
->pin_based_vm_exec_control
;
1443 vmcs12
->vm_exit_controls
= evmcs
->vm_exit_controls
;
1444 vmcs12
->secondary_vm_exec_control
=
1445 evmcs
->secondary_vm_exec_control
;
1448 if (unlikely(!(evmcs
->hv_clean_fields
&
1449 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP
))) {
1450 vmcs12
->io_bitmap_a
= evmcs
->io_bitmap_a
;
1451 vmcs12
->io_bitmap_b
= evmcs
->io_bitmap_b
;
1454 if (unlikely(!(evmcs
->hv_clean_fields
&
1455 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP
))) {
1456 vmcs12
->msr_bitmap
= evmcs
->msr_bitmap
;
1459 if (unlikely(!(evmcs
->hv_clean_fields
&
1460 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
))) {
1461 vmcs12
->guest_es_base
= evmcs
->guest_es_base
;
1462 vmcs12
->guest_cs_base
= evmcs
->guest_cs_base
;
1463 vmcs12
->guest_ss_base
= evmcs
->guest_ss_base
;
1464 vmcs12
->guest_ds_base
= evmcs
->guest_ds_base
;
1465 vmcs12
->guest_fs_base
= evmcs
->guest_fs_base
;
1466 vmcs12
->guest_gs_base
= evmcs
->guest_gs_base
;
1467 vmcs12
->guest_ldtr_base
= evmcs
->guest_ldtr_base
;
1468 vmcs12
->guest_tr_base
= evmcs
->guest_tr_base
;
1469 vmcs12
->guest_gdtr_base
= evmcs
->guest_gdtr_base
;
1470 vmcs12
->guest_idtr_base
= evmcs
->guest_idtr_base
;
1471 vmcs12
->guest_es_limit
= evmcs
->guest_es_limit
;
1472 vmcs12
->guest_cs_limit
= evmcs
->guest_cs_limit
;
1473 vmcs12
->guest_ss_limit
= evmcs
->guest_ss_limit
;
1474 vmcs12
->guest_ds_limit
= evmcs
->guest_ds_limit
;
1475 vmcs12
->guest_fs_limit
= evmcs
->guest_fs_limit
;
1476 vmcs12
->guest_gs_limit
= evmcs
->guest_gs_limit
;
1477 vmcs12
->guest_ldtr_limit
= evmcs
->guest_ldtr_limit
;
1478 vmcs12
->guest_tr_limit
= evmcs
->guest_tr_limit
;
1479 vmcs12
->guest_gdtr_limit
= evmcs
->guest_gdtr_limit
;
1480 vmcs12
->guest_idtr_limit
= evmcs
->guest_idtr_limit
;
1481 vmcs12
->guest_es_ar_bytes
= evmcs
->guest_es_ar_bytes
;
1482 vmcs12
->guest_cs_ar_bytes
= evmcs
->guest_cs_ar_bytes
;
1483 vmcs12
->guest_ss_ar_bytes
= evmcs
->guest_ss_ar_bytes
;
1484 vmcs12
->guest_ds_ar_bytes
= evmcs
->guest_ds_ar_bytes
;
1485 vmcs12
->guest_fs_ar_bytes
= evmcs
->guest_fs_ar_bytes
;
1486 vmcs12
->guest_gs_ar_bytes
= evmcs
->guest_gs_ar_bytes
;
1487 vmcs12
->guest_ldtr_ar_bytes
= evmcs
->guest_ldtr_ar_bytes
;
1488 vmcs12
->guest_tr_ar_bytes
= evmcs
->guest_tr_ar_bytes
;
1489 vmcs12
->guest_es_selector
= evmcs
->guest_es_selector
;
1490 vmcs12
->guest_cs_selector
= evmcs
->guest_cs_selector
;
1491 vmcs12
->guest_ss_selector
= evmcs
->guest_ss_selector
;
1492 vmcs12
->guest_ds_selector
= evmcs
->guest_ds_selector
;
1493 vmcs12
->guest_fs_selector
= evmcs
->guest_fs_selector
;
1494 vmcs12
->guest_gs_selector
= evmcs
->guest_gs_selector
;
1495 vmcs12
->guest_ldtr_selector
= evmcs
->guest_ldtr_selector
;
1496 vmcs12
->guest_tr_selector
= evmcs
->guest_tr_selector
;
1499 if (unlikely(!(evmcs
->hv_clean_fields
&
1500 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2
))) {
1501 vmcs12
->tsc_offset
= evmcs
->tsc_offset
;
1502 vmcs12
->virtual_apic_page_addr
= evmcs
->virtual_apic_page_addr
;
1503 vmcs12
->xss_exit_bitmap
= evmcs
->xss_exit_bitmap
;
1506 if (unlikely(!(evmcs
->hv_clean_fields
&
1507 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR
))) {
1508 vmcs12
->cr0_guest_host_mask
= evmcs
->cr0_guest_host_mask
;
1509 vmcs12
->cr4_guest_host_mask
= evmcs
->cr4_guest_host_mask
;
1510 vmcs12
->cr0_read_shadow
= evmcs
->cr0_read_shadow
;
1511 vmcs12
->cr4_read_shadow
= evmcs
->cr4_read_shadow
;
1512 vmcs12
->guest_cr0
= evmcs
->guest_cr0
;
1513 vmcs12
->guest_cr3
= evmcs
->guest_cr3
;
1514 vmcs12
->guest_cr4
= evmcs
->guest_cr4
;
1515 vmcs12
->guest_dr7
= evmcs
->guest_dr7
;
1518 if (unlikely(!(evmcs
->hv_clean_fields
&
1519 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER
))) {
1520 vmcs12
->host_fs_base
= evmcs
->host_fs_base
;
1521 vmcs12
->host_gs_base
= evmcs
->host_gs_base
;
1522 vmcs12
->host_tr_base
= evmcs
->host_tr_base
;
1523 vmcs12
->host_gdtr_base
= evmcs
->host_gdtr_base
;
1524 vmcs12
->host_idtr_base
= evmcs
->host_idtr_base
;
1525 vmcs12
->host_rsp
= evmcs
->host_rsp
;
1528 if (unlikely(!(evmcs
->hv_clean_fields
&
1529 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT
))) {
1530 vmcs12
->ept_pointer
= evmcs
->ept_pointer
;
1531 vmcs12
->virtual_processor_id
= evmcs
->virtual_processor_id
;
1534 if (unlikely(!(evmcs
->hv_clean_fields
&
1535 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
))) {
1536 vmcs12
->vmcs_link_pointer
= evmcs
->vmcs_link_pointer
;
1537 vmcs12
->guest_ia32_debugctl
= evmcs
->guest_ia32_debugctl
;
1538 vmcs12
->guest_ia32_pat
= evmcs
->guest_ia32_pat
;
1539 vmcs12
->guest_ia32_efer
= evmcs
->guest_ia32_efer
;
1540 vmcs12
->guest_pdptr0
= evmcs
->guest_pdptr0
;
1541 vmcs12
->guest_pdptr1
= evmcs
->guest_pdptr1
;
1542 vmcs12
->guest_pdptr2
= evmcs
->guest_pdptr2
;
1543 vmcs12
->guest_pdptr3
= evmcs
->guest_pdptr3
;
1544 vmcs12
->guest_pending_dbg_exceptions
=
1545 evmcs
->guest_pending_dbg_exceptions
;
1546 vmcs12
->guest_sysenter_esp
= evmcs
->guest_sysenter_esp
;
1547 vmcs12
->guest_sysenter_eip
= evmcs
->guest_sysenter_eip
;
1548 vmcs12
->guest_bndcfgs
= evmcs
->guest_bndcfgs
;
1549 vmcs12
->guest_activity_state
= evmcs
->guest_activity_state
;
1550 vmcs12
->guest_sysenter_cs
= evmcs
->guest_sysenter_cs
;
1555 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1556 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1557 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1558 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1559 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1560 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1561 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1562 * vmcs12->page_fault_error_code_mask =
1563 * evmcs->page_fault_error_code_mask;
1564 * vmcs12->page_fault_error_code_match =
1565 * evmcs->page_fault_error_code_match;
1566 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1567 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1568 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1569 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1574 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1575 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1576 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1577 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1578 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1579 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1580 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1581 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1582 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1583 * vmcs12->exit_qualification = evmcs->exit_qualification;
1584 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1586 * Not present in struct vmcs12:
1587 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1588 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1589 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1590 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1596 static int copy_vmcs12_to_enlightened(struct vcpu_vmx
*vmx
)
1598 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1599 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1602 * Should not be changed by KVM:
1604 * evmcs->host_es_selector = vmcs12->host_es_selector;
1605 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1606 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1607 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1608 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1609 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1610 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1611 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1612 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1613 * evmcs->host_cr0 = vmcs12->host_cr0;
1614 * evmcs->host_cr3 = vmcs12->host_cr3;
1615 * evmcs->host_cr4 = vmcs12->host_cr4;
1616 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1617 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1618 * evmcs->host_rip = vmcs12->host_rip;
1619 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1620 * evmcs->host_fs_base = vmcs12->host_fs_base;
1621 * evmcs->host_gs_base = vmcs12->host_gs_base;
1622 * evmcs->host_tr_base = vmcs12->host_tr_base;
1623 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1624 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1625 * evmcs->host_rsp = vmcs12->host_rsp;
1626 * sync_vmcs12() doesn't read these:
1627 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1628 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1629 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1630 * evmcs->ept_pointer = vmcs12->ept_pointer;
1631 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1632 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1633 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1634 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1635 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1636 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1637 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1638 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1639 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1640 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1641 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1642 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1643 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1644 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1645 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1646 * evmcs->page_fault_error_code_mask =
1647 * vmcs12->page_fault_error_code_mask;
1648 * evmcs->page_fault_error_code_match =
1649 * vmcs12->page_fault_error_code_match;
1650 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1651 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1652 * evmcs->tsc_offset = vmcs12->tsc_offset;
1653 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1654 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1655 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1656 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1657 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1658 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1659 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1660 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1662 * Not present in struct vmcs12:
1663 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1664 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1665 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1666 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1669 evmcs
->guest_es_selector
= vmcs12
->guest_es_selector
;
1670 evmcs
->guest_cs_selector
= vmcs12
->guest_cs_selector
;
1671 evmcs
->guest_ss_selector
= vmcs12
->guest_ss_selector
;
1672 evmcs
->guest_ds_selector
= vmcs12
->guest_ds_selector
;
1673 evmcs
->guest_fs_selector
= vmcs12
->guest_fs_selector
;
1674 evmcs
->guest_gs_selector
= vmcs12
->guest_gs_selector
;
1675 evmcs
->guest_ldtr_selector
= vmcs12
->guest_ldtr_selector
;
1676 evmcs
->guest_tr_selector
= vmcs12
->guest_tr_selector
;
1678 evmcs
->guest_es_limit
= vmcs12
->guest_es_limit
;
1679 evmcs
->guest_cs_limit
= vmcs12
->guest_cs_limit
;
1680 evmcs
->guest_ss_limit
= vmcs12
->guest_ss_limit
;
1681 evmcs
->guest_ds_limit
= vmcs12
->guest_ds_limit
;
1682 evmcs
->guest_fs_limit
= vmcs12
->guest_fs_limit
;
1683 evmcs
->guest_gs_limit
= vmcs12
->guest_gs_limit
;
1684 evmcs
->guest_ldtr_limit
= vmcs12
->guest_ldtr_limit
;
1685 evmcs
->guest_tr_limit
= vmcs12
->guest_tr_limit
;
1686 evmcs
->guest_gdtr_limit
= vmcs12
->guest_gdtr_limit
;
1687 evmcs
->guest_idtr_limit
= vmcs12
->guest_idtr_limit
;
1689 evmcs
->guest_es_ar_bytes
= vmcs12
->guest_es_ar_bytes
;
1690 evmcs
->guest_cs_ar_bytes
= vmcs12
->guest_cs_ar_bytes
;
1691 evmcs
->guest_ss_ar_bytes
= vmcs12
->guest_ss_ar_bytes
;
1692 evmcs
->guest_ds_ar_bytes
= vmcs12
->guest_ds_ar_bytes
;
1693 evmcs
->guest_fs_ar_bytes
= vmcs12
->guest_fs_ar_bytes
;
1694 evmcs
->guest_gs_ar_bytes
= vmcs12
->guest_gs_ar_bytes
;
1695 evmcs
->guest_ldtr_ar_bytes
= vmcs12
->guest_ldtr_ar_bytes
;
1696 evmcs
->guest_tr_ar_bytes
= vmcs12
->guest_tr_ar_bytes
;
1698 evmcs
->guest_es_base
= vmcs12
->guest_es_base
;
1699 evmcs
->guest_cs_base
= vmcs12
->guest_cs_base
;
1700 evmcs
->guest_ss_base
= vmcs12
->guest_ss_base
;
1701 evmcs
->guest_ds_base
= vmcs12
->guest_ds_base
;
1702 evmcs
->guest_fs_base
= vmcs12
->guest_fs_base
;
1703 evmcs
->guest_gs_base
= vmcs12
->guest_gs_base
;
1704 evmcs
->guest_ldtr_base
= vmcs12
->guest_ldtr_base
;
1705 evmcs
->guest_tr_base
= vmcs12
->guest_tr_base
;
1706 evmcs
->guest_gdtr_base
= vmcs12
->guest_gdtr_base
;
1707 evmcs
->guest_idtr_base
= vmcs12
->guest_idtr_base
;
1709 evmcs
->guest_ia32_pat
= vmcs12
->guest_ia32_pat
;
1710 evmcs
->guest_ia32_efer
= vmcs12
->guest_ia32_efer
;
1712 evmcs
->guest_pdptr0
= vmcs12
->guest_pdptr0
;
1713 evmcs
->guest_pdptr1
= vmcs12
->guest_pdptr1
;
1714 evmcs
->guest_pdptr2
= vmcs12
->guest_pdptr2
;
1715 evmcs
->guest_pdptr3
= vmcs12
->guest_pdptr3
;
1717 evmcs
->guest_pending_dbg_exceptions
=
1718 vmcs12
->guest_pending_dbg_exceptions
;
1719 evmcs
->guest_sysenter_esp
= vmcs12
->guest_sysenter_esp
;
1720 evmcs
->guest_sysenter_eip
= vmcs12
->guest_sysenter_eip
;
1722 evmcs
->guest_activity_state
= vmcs12
->guest_activity_state
;
1723 evmcs
->guest_sysenter_cs
= vmcs12
->guest_sysenter_cs
;
1725 evmcs
->guest_cr0
= vmcs12
->guest_cr0
;
1726 evmcs
->guest_cr3
= vmcs12
->guest_cr3
;
1727 evmcs
->guest_cr4
= vmcs12
->guest_cr4
;
1728 evmcs
->guest_dr7
= vmcs12
->guest_dr7
;
1730 evmcs
->guest_physical_address
= vmcs12
->guest_physical_address
;
1732 evmcs
->vm_instruction_error
= vmcs12
->vm_instruction_error
;
1733 evmcs
->vm_exit_reason
= vmcs12
->vm_exit_reason
;
1734 evmcs
->vm_exit_intr_info
= vmcs12
->vm_exit_intr_info
;
1735 evmcs
->vm_exit_intr_error_code
= vmcs12
->vm_exit_intr_error_code
;
1736 evmcs
->idt_vectoring_info_field
= vmcs12
->idt_vectoring_info_field
;
1737 evmcs
->idt_vectoring_error_code
= vmcs12
->idt_vectoring_error_code
;
1738 evmcs
->vm_exit_instruction_len
= vmcs12
->vm_exit_instruction_len
;
1739 evmcs
->vmx_instruction_info
= vmcs12
->vmx_instruction_info
;
1741 evmcs
->exit_qualification
= vmcs12
->exit_qualification
;
1743 evmcs
->guest_linear_address
= vmcs12
->guest_linear_address
;
1744 evmcs
->guest_rsp
= vmcs12
->guest_rsp
;
1745 evmcs
->guest_rflags
= vmcs12
->guest_rflags
;
1747 evmcs
->guest_interruptibility_info
=
1748 vmcs12
->guest_interruptibility_info
;
1749 evmcs
->cpu_based_vm_exec_control
= vmcs12
->cpu_based_vm_exec_control
;
1750 evmcs
->vm_entry_controls
= vmcs12
->vm_entry_controls
;
1751 evmcs
->vm_entry_intr_info_field
= vmcs12
->vm_entry_intr_info_field
;
1752 evmcs
->vm_entry_exception_error_code
=
1753 vmcs12
->vm_entry_exception_error_code
;
1754 evmcs
->vm_entry_instruction_len
= vmcs12
->vm_entry_instruction_len
;
1756 evmcs
->guest_rip
= vmcs12
->guest_rip
;
1758 evmcs
->guest_bndcfgs
= vmcs12
->guest_bndcfgs
;
1764 * This is an equivalent of the nested hypervisor executing the vmptrld
1767 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu
*vcpu
,
1770 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1771 struct hv_vp_assist_page assist_page
;
1773 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
))
1776 if (unlikely(!kvm_hv_get_assist_page(vcpu
, &assist_page
)))
1779 if (unlikely(!assist_page
.enlighten_vmentry
))
1782 if (unlikely(assist_page
.current_nested_vmcs
!=
1783 vmx
->nested
.hv_evmcs_vmptr
)) {
1785 if (!vmx
->nested
.hv_evmcs
)
1786 vmx
->nested
.current_vmptr
= -1ull;
1788 nested_release_evmcs(vcpu
);
1790 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(assist_page
.current_nested_vmcs
),
1791 &vmx
->nested
.hv_evmcs_map
))
1794 vmx
->nested
.hv_evmcs
= vmx
->nested
.hv_evmcs_map
.hva
;
1797 * Currently, KVM only supports eVMCS version 1
1798 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1799 * value to first u32 field of eVMCS which should specify eVMCS
1802 * Guest should be aware of supported eVMCS versions by host by
1803 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1804 * expected to set this CPUID leaf according to the value
1805 * returned in vmcs_version from nested_enable_evmcs().
1807 * However, it turns out that Microsoft Hyper-V fails to comply
1808 * to their own invented interface: When Hyper-V use eVMCS, it
1809 * just sets first u32 field of eVMCS to revision_id specified
1810 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1811 * which is one of the supported versions specified in
1812 * CPUID.0x4000000A.EAX[0:15].
1814 * To overcome Hyper-V bug, we accept here either a supported
1815 * eVMCS version or VMCS12 revision_id as valid values for first
1816 * u32 field of eVMCS.
1818 if ((vmx
->nested
.hv_evmcs
->revision_id
!= KVM_EVMCS_VERSION
) &&
1819 (vmx
->nested
.hv_evmcs
->revision_id
!= VMCS12_REVISION
)) {
1820 nested_release_evmcs(vcpu
);
1824 vmx
->nested
.dirty_vmcs12
= true;
1826 * As we keep L2 state for one guest only 'hv_clean_fields' mask
1827 * can't be used when we switch between them. Reset it here for
1830 vmx
->nested
.hv_evmcs
->hv_clean_fields
&=
1831 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
1832 vmx
->nested
.hv_evmcs_vmptr
= assist_page
.current_nested_vmcs
;
1835 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1836 * reloaded from guest's memory (read only fields, fields not
1837 * present in struct hv_enlightened_vmcs, ...). Make sure there
1841 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1842 memset(vmcs12
, 0, sizeof(*vmcs12
));
1843 vmcs12
->hdr
.revision_id
= VMCS12_REVISION
;
1850 void nested_sync_from_vmcs12(struct kvm_vcpu
*vcpu
)
1852 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1855 * hv_evmcs may end up being not mapped after migration (when
1856 * L2 was running), map it here to make sure vmcs12 changes are
1857 * properly reflected.
1859 if (vmx
->nested
.enlightened_vmcs_enabled
&& !vmx
->nested
.hv_evmcs
)
1860 nested_vmx_handle_enlightened_vmptrld(vcpu
, false);
1862 if (vmx
->nested
.hv_evmcs
) {
1863 copy_vmcs12_to_enlightened(vmx
);
1864 /* All fields are clean */
1865 vmx
->nested
.hv_evmcs
->hv_clean_fields
|=
1866 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
1868 copy_vmcs12_to_shadow(vmx
);
1871 vmx
->nested
.need_vmcs12_sync
= false;
1874 static enum hrtimer_restart
vmx_preemption_timer_fn(struct hrtimer
*timer
)
1876 struct vcpu_vmx
*vmx
=
1877 container_of(timer
, struct vcpu_vmx
, nested
.preemption_timer
);
1879 vmx
->nested
.preemption_timer_expired
= true;
1880 kvm_make_request(KVM_REQ_EVENT
, &vmx
->vcpu
);
1881 kvm_vcpu_kick(&vmx
->vcpu
);
1883 return HRTIMER_NORESTART
;
1886 static void vmx_start_preemption_timer(struct kvm_vcpu
*vcpu
)
1888 u64 preemption_timeout
= get_vmcs12(vcpu
)->vmx_preemption_timer_value
;
1889 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1892 * A timer value of zero is architecturally guaranteed to cause
1893 * a VMExit prior to executing any instructions in the guest.
1895 if (preemption_timeout
== 0) {
1896 vmx_preemption_timer_fn(&vmx
->nested
.preemption_timer
);
1900 if (vcpu
->arch
.virtual_tsc_khz
== 0)
1903 preemption_timeout
<<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
1904 preemption_timeout
*= 1000000;
1905 do_div(preemption_timeout
, vcpu
->arch
.virtual_tsc_khz
);
1906 hrtimer_start(&vmx
->nested
.preemption_timer
,
1907 ns_to_ktime(preemption_timeout
), HRTIMER_MODE_REL
);
1910 static u64
nested_vmx_calc_efer(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
1912 if (vmx
->nested
.nested_run_pending
&&
1913 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
))
1914 return vmcs12
->guest_ia32_efer
;
1915 else if (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
)
1916 return vmx
->vcpu
.arch
.efer
| (EFER_LMA
| EFER_LME
);
1918 return vmx
->vcpu
.arch
.efer
& ~(EFER_LMA
| EFER_LME
);
1921 static void prepare_vmcs02_constant_state(struct vcpu_vmx
*vmx
)
1924 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1925 * according to L0's settings (vmcs12 is irrelevant here). Host
1926 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1927 * will be set as needed prior to VMLAUNCH/VMRESUME.
1929 if (vmx
->nested
.vmcs02_initialized
)
1931 vmx
->nested
.vmcs02_initialized
= true;
1934 * We don't care what the EPTP value is we just need to guarantee
1935 * it's valid so we don't get a false positive when doing early
1936 * consistency checks.
1938 if (enable_ept
&& nested_early_check
)
1939 vmcs_write64(EPT_POINTER
, construct_eptp(&vmx
->vcpu
, 0));
1941 /* All VMFUNCs are currently emulated through L0 vmexits. */
1942 if (cpu_has_vmx_vmfunc())
1943 vmcs_write64(VM_FUNCTION_CONTROL
, 0);
1945 if (cpu_has_vmx_posted_intr())
1946 vmcs_write16(POSTED_INTR_NV
, POSTED_INTR_NESTED_VECTOR
);
1948 if (cpu_has_vmx_msr_bitmap())
1949 vmcs_write64(MSR_BITMAP
, __pa(vmx
->nested
.vmcs02
.msr_bitmap
));
1952 vmcs_write64(PML_ADDRESS
, page_to_phys(vmx
->pml_pg
));
1955 * Set the MSR load/store lists to match L0's settings. Only the
1956 * addresses are constant (for vmcs02), the counts can change based
1957 * on L2's behavior, e.g. switching to/from long mode.
1959 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, 0);
1960 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.host
.val
));
1961 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.guest
.val
));
1963 vmx_set_constant_host_state(vmx
);
1966 static void prepare_vmcs02_early_full(struct vcpu_vmx
*vmx
,
1967 struct vmcs12
*vmcs12
)
1969 prepare_vmcs02_constant_state(vmx
);
1971 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
1974 if (nested_cpu_has_vpid(vmcs12
) && vmx
->nested
.vpid02
)
1975 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->nested
.vpid02
);
1977 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
1981 static void prepare_vmcs02_early(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
1983 u32 exec_control
, vmcs12_exec_ctrl
;
1984 u64 guest_efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
1986 if (vmx
->nested
.dirty_vmcs12
|| vmx
->nested
.hv_evmcs
)
1987 prepare_vmcs02_early_full(vmx
, vmcs12
);
1992 exec_control
= vmcs12
->pin_based_vm_exec_control
;
1994 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
1995 exec_control
|= vmcs_config
.pin_based_exec_ctrl
;
1996 exec_control
&= ~PIN_BASED_VMX_PREEMPTION_TIMER
;
1997 vmx
->loaded_vmcs
->hv_timer_armed
= false;
1999 /* Posted interrupts setting is only taken from vmcs12. */
2000 if (nested_cpu_has_posted_intr(vmcs12
)) {
2001 vmx
->nested
.posted_intr_nv
= vmcs12
->posted_intr_nv
;
2002 vmx
->nested
.pi_pending
= false;
2004 exec_control
&= ~PIN_BASED_POSTED_INTR
;
2006 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL
, exec_control
);
2011 exec_control
= vmx_exec_control(vmx
); /* L0's desires */
2012 exec_control
&= ~CPU_BASED_VIRTUAL_INTR_PENDING
;
2013 exec_control
&= ~CPU_BASED_VIRTUAL_NMI_PENDING
;
2014 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2015 exec_control
|= vmcs12
->cpu_based_vm_exec_control
;
2018 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
2019 * nested_get_vmcs12_pages can't fix it up, the illegal value
2020 * will result in a VM entry failure.
2022 if (exec_control
& CPU_BASED_TPR_SHADOW
) {
2023 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, -1ull);
2024 vmcs_write32(TPR_THRESHOLD
, vmcs12
->tpr_threshold
);
2026 #ifdef CONFIG_X86_64
2027 exec_control
|= CPU_BASED_CR8_LOAD_EXITING
|
2028 CPU_BASED_CR8_STORE_EXITING
;
2033 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2034 * for I/O port accesses.
2036 exec_control
&= ~CPU_BASED_USE_IO_BITMAPS
;
2037 exec_control
|= CPU_BASED_UNCOND_IO_EXITING
;
2038 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, exec_control
);
2041 * SECONDARY EXEC CONTROLS
2043 if (cpu_has_secondary_exec_ctrls()) {
2044 exec_control
= vmx
->secondary_exec_control
;
2046 /* Take the following fields only from vmcs12 */
2047 exec_control
&= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
2048 SECONDARY_EXEC_ENABLE_INVPCID
|
2049 SECONDARY_EXEC_RDTSCP
|
2050 SECONDARY_EXEC_XSAVES
|
2051 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
2052 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2053 SECONDARY_EXEC_ENABLE_VMFUNC
);
2054 if (nested_cpu_has(vmcs12
,
2055 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)) {
2056 vmcs12_exec_ctrl
= vmcs12
->secondary_vm_exec_control
&
2057 ~SECONDARY_EXEC_ENABLE_PML
;
2058 exec_control
|= vmcs12_exec_ctrl
;
2061 /* VMCS shadowing for L2 is emulated for now */
2062 exec_control
&= ~SECONDARY_EXEC_SHADOW_VMCS
;
2064 if (exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
)
2065 vmcs_write16(GUEST_INTR_STATUS
,
2066 vmcs12
->guest_intr_status
);
2069 * Write an illegal value to APIC_ACCESS_ADDR. Later,
2070 * nested_get_vmcs12_pages will either fix it up or
2071 * remove the VM execution control.
2073 if (exec_control
& SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)
2074 vmcs_write64(APIC_ACCESS_ADDR
, -1ull);
2076 if (exec_control
& SECONDARY_EXEC_ENCLS_EXITING
)
2077 vmcs_write64(ENCLS_EXITING_BITMAP
, -1ull);
2079 vmcs_write32(SECONDARY_VM_EXEC_CONTROL
, exec_control
);
2085 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2086 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2087 * on the related bits (if supported by the CPU) in the hope that
2088 * we can avoid VMWrites during vmx_set_efer().
2090 exec_control
= (vmcs12
->vm_entry_controls
| vmx_vmentry_ctrl()) &
2091 ~VM_ENTRY_IA32E_MODE
& ~VM_ENTRY_LOAD_IA32_EFER
;
2092 if (cpu_has_load_ia32_efer()) {
2093 if (guest_efer
& EFER_LMA
)
2094 exec_control
|= VM_ENTRY_IA32E_MODE
;
2095 if (guest_efer
!= host_efer
)
2096 exec_control
|= VM_ENTRY_LOAD_IA32_EFER
;
2098 vm_entry_controls_init(vmx
, exec_control
);
2103 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2104 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2105 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2107 exec_control
= vmx_vmexit_ctrl();
2108 if (cpu_has_load_ia32_efer() && guest_efer
!= host_efer
)
2109 exec_control
|= VM_EXIT_LOAD_IA32_EFER
;
2110 vm_exit_controls_init(vmx
, exec_control
);
2113 * Conceptually we want to copy the PML address and index from
2114 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
2115 * since we always flush the log on each vmexit and never change
2116 * the PML address (once set), this happens to be equivalent to
2117 * simply resetting the index in vmcs02.
2120 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
2123 * Interrupt/Exception Fields
2125 if (vmx
->nested
.nested_run_pending
) {
2126 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2127 vmcs12
->vm_entry_intr_info_field
);
2128 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
,
2129 vmcs12
->vm_entry_exception_error_code
);
2130 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
2131 vmcs12
->vm_entry_instruction_len
);
2132 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
,
2133 vmcs12
->guest_interruptibility_info
);
2134 vmx
->loaded_vmcs
->nmi_known_unmasked
=
2135 !(vmcs12
->guest_interruptibility_info
& GUEST_INTR_STATE_NMI
);
2137 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0);
2141 static void prepare_vmcs02_full(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2143 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2145 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2146 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
)) {
2147 vmcs_write16(GUEST_ES_SELECTOR
, vmcs12
->guest_es_selector
);
2148 vmcs_write16(GUEST_CS_SELECTOR
, vmcs12
->guest_cs_selector
);
2149 vmcs_write16(GUEST_SS_SELECTOR
, vmcs12
->guest_ss_selector
);
2150 vmcs_write16(GUEST_DS_SELECTOR
, vmcs12
->guest_ds_selector
);
2151 vmcs_write16(GUEST_FS_SELECTOR
, vmcs12
->guest_fs_selector
);
2152 vmcs_write16(GUEST_GS_SELECTOR
, vmcs12
->guest_gs_selector
);
2153 vmcs_write16(GUEST_LDTR_SELECTOR
, vmcs12
->guest_ldtr_selector
);
2154 vmcs_write16(GUEST_TR_SELECTOR
, vmcs12
->guest_tr_selector
);
2155 vmcs_write32(GUEST_ES_LIMIT
, vmcs12
->guest_es_limit
);
2156 vmcs_write32(GUEST_CS_LIMIT
, vmcs12
->guest_cs_limit
);
2157 vmcs_write32(GUEST_SS_LIMIT
, vmcs12
->guest_ss_limit
);
2158 vmcs_write32(GUEST_DS_LIMIT
, vmcs12
->guest_ds_limit
);
2159 vmcs_write32(GUEST_FS_LIMIT
, vmcs12
->guest_fs_limit
);
2160 vmcs_write32(GUEST_GS_LIMIT
, vmcs12
->guest_gs_limit
);
2161 vmcs_write32(GUEST_LDTR_LIMIT
, vmcs12
->guest_ldtr_limit
);
2162 vmcs_write32(GUEST_TR_LIMIT
, vmcs12
->guest_tr_limit
);
2163 vmcs_write32(GUEST_GDTR_LIMIT
, vmcs12
->guest_gdtr_limit
);
2164 vmcs_write32(GUEST_IDTR_LIMIT
, vmcs12
->guest_idtr_limit
);
2165 vmcs_write32(GUEST_ES_AR_BYTES
, vmcs12
->guest_es_ar_bytes
);
2166 vmcs_write32(GUEST_DS_AR_BYTES
, vmcs12
->guest_ds_ar_bytes
);
2167 vmcs_write32(GUEST_FS_AR_BYTES
, vmcs12
->guest_fs_ar_bytes
);
2168 vmcs_write32(GUEST_GS_AR_BYTES
, vmcs12
->guest_gs_ar_bytes
);
2169 vmcs_write32(GUEST_LDTR_AR_BYTES
, vmcs12
->guest_ldtr_ar_bytes
);
2170 vmcs_write32(GUEST_TR_AR_BYTES
, vmcs12
->guest_tr_ar_bytes
);
2171 vmcs_writel(GUEST_ES_BASE
, vmcs12
->guest_es_base
);
2172 vmcs_writel(GUEST_CS_BASE
, vmcs12
->guest_cs_base
);
2173 vmcs_writel(GUEST_SS_BASE
, vmcs12
->guest_ss_base
);
2174 vmcs_writel(GUEST_DS_BASE
, vmcs12
->guest_ds_base
);
2175 vmcs_writel(GUEST_FS_BASE
, vmcs12
->guest_fs_base
);
2176 vmcs_writel(GUEST_GS_BASE
, vmcs12
->guest_gs_base
);
2177 vmcs_writel(GUEST_LDTR_BASE
, vmcs12
->guest_ldtr_base
);
2178 vmcs_writel(GUEST_TR_BASE
, vmcs12
->guest_tr_base
);
2179 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->guest_gdtr_base
);
2180 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->guest_idtr_base
);
2183 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2184 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
)) {
2185 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->guest_sysenter_cs
);
2186 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
2187 vmcs12
->guest_pending_dbg_exceptions
);
2188 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->guest_sysenter_esp
);
2189 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->guest_sysenter_eip
);
2192 * L1 may access the L2's PDPTR, so save them to construct
2196 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2197 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2198 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2199 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2203 if (nested_cpu_has_xsaves(vmcs12
))
2204 vmcs_write64(XSS_EXIT_BITMAP
, vmcs12
->xss_exit_bitmap
);
2207 * Whether page-faults are trapped is determined by a combination of
2208 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2209 * If enable_ept, L0 doesn't care about page faults and we should
2210 * set all of these to L1's desires. However, if !enable_ept, L0 does
2211 * care about (at least some) page faults, and because it is not easy
2212 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2213 * to exit on each and every L2 page fault. This is done by setting
2214 * MASK=MATCH=0 and (see below) EB.PF=1.
2215 * Note that below we don't need special code to set EB.PF beyond the
2216 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2217 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2218 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2220 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
,
2221 enable_ept
? vmcs12
->page_fault_error_code_mask
: 0);
2222 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
,
2223 enable_ept
? vmcs12
->page_fault_error_code_match
: 0);
2225 if (cpu_has_vmx_apicv()) {
2226 vmcs_write64(EOI_EXIT_BITMAP0
, vmcs12
->eoi_exit_bitmap0
);
2227 vmcs_write64(EOI_EXIT_BITMAP1
, vmcs12
->eoi_exit_bitmap1
);
2228 vmcs_write64(EOI_EXIT_BITMAP2
, vmcs12
->eoi_exit_bitmap2
);
2229 vmcs_write64(EOI_EXIT_BITMAP3
, vmcs12
->eoi_exit_bitmap3
);
2232 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
2233 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
2235 set_cr4_guest_host_mask(vmx
);
2237 if (kvm_mpx_supported()) {
2238 if (vmx
->nested
.nested_run_pending
&&
2239 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
2240 vmcs_write64(GUEST_BNDCFGS
, vmcs12
->guest_bndcfgs
);
2242 vmcs_write64(GUEST_BNDCFGS
, vmx
->nested
.vmcs01_guest_bndcfgs
);
2247 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2248 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2249 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2250 * guest in a way that will both be appropriate to L1's requests, and our
2251 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2252 * function also has additional necessary side-effects, like setting various
2253 * vcpu->arch fields.
2254 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2255 * is assigned to entry_failure_code on failure.
2257 static int prepare_vmcs02(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
2258 u32
*entry_failure_code
)
2260 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2261 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2263 if (vmx
->nested
.dirty_vmcs12
|| vmx
->nested
.hv_evmcs
) {
2264 prepare_vmcs02_full(vmx
, vmcs12
);
2265 vmx
->nested
.dirty_vmcs12
= false;
2269 * First, the fields that are shadowed. This must be kept in sync
2270 * with vmcs_shadow_fields.h.
2272 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2273 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
)) {
2274 vmcs_write32(GUEST_CS_AR_BYTES
, vmcs12
->guest_cs_ar_bytes
);
2275 vmcs_write32(GUEST_SS_AR_BYTES
, vmcs12
->guest_ss_ar_bytes
);
2278 if (vmx
->nested
.nested_run_pending
&&
2279 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
)) {
2280 kvm_set_dr(vcpu
, 7, vmcs12
->guest_dr7
);
2281 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmcs12
->guest_ia32_debugctl
);
2283 kvm_set_dr(vcpu
, 7, vcpu
->arch
.dr7
);
2284 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmx
->nested
.vmcs01_debugctl
);
2286 vmx_set_rflags(vcpu
, vmcs12
->guest_rflags
);
2288 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2289 * bitwise-or of what L1 wants to trap for L2, and what we want to
2290 * trap. Note that CR0.TS also needs updating - we do this later.
2292 update_exception_bitmap(vcpu
);
2293 vcpu
->arch
.cr0_guest_owned_bits
&= ~vmcs12
->cr0_guest_host_mask
;
2294 vmcs_writel(CR0_GUEST_HOST_MASK
, ~vcpu
->arch
.cr0_guest_owned_bits
);
2296 if (vmx
->nested
.nested_run_pending
&&
2297 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
)) {
2298 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->guest_ia32_pat
);
2299 vcpu
->arch
.pat
= vmcs12
->guest_ia32_pat
;
2300 } else if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
) {
2301 vmcs_write64(GUEST_IA32_PAT
, vmx
->vcpu
.arch
.pat
);
2304 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
2306 if (kvm_has_tsc_control
)
2307 decache_tsc_multiplier(vmx
);
2311 * There is no direct mapping between vpid02 and vpid12, the
2312 * vpid02 is per-vCPU for L0 and reused while the value of
2313 * vpid12 is changed w/ one invvpid during nested vmentry.
2314 * The vpid12 is allocated by L1 for L2, so it will not
2315 * influence global bitmap(for vpid01 and vpid02 allocation)
2316 * even if spawn a lot of nested vCPUs.
2318 if (nested_cpu_has_vpid(vmcs12
) && nested_has_guest_tlb_tag(vcpu
)) {
2319 if (vmcs12
->virtual_processor_id
!= vmx
->nested
.last_vpid
) {
2320 vmx
->nested
.last_vpid
= vmcs12
->virtual_processor_id
;
2321 __vmx_flush_tlb(vcpu
, nested_get_vpid02(vcpu
), false);
2325 * If L1 use EPT, then L0 needs to execute INVEPT on
2326 * EPTP02 instead of EPTP01. Therefore, delay TLB
2327 * flush until vmcs02->eptp is fully updated by
2328 * KVM_REQ_LOAD_CR3. Note that this assumes
2329 * KVM_REQ_TLB_FLUSH is evaluated after
2330 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2332 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2336 if (nested_cpu_has_ept(vmcs12
))
2337 nested_ept_init_mmu_context(vcpu
);
2338 else if (nested_cpu_has2(vmcs12
,
2339 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
2340 vmx_flush_tlb(vcpu
, true);
2343 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2344 * bits which we consider mandatory enabled.
2345 * The CR0_READ_SHADOW is what L2 should have expected to read given
2346 * the specifications by L1; It's not enough to take
2347 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2348 * have more bits than L1 expected.
2350 vmx_set_cr0(vcpu
, vmcs12
->guest_cr0
);
2351 vmcs_writel(CR0_READ_SHADOW
, nested_read_cr0(vmcs12
));
2353 vmx_set_cr4(vcpu
, vmcs12
->guest_cr4
);
2354 vmcs_writel(CR4_READ_SHADOW
, nested_read_cr4(vmcs12
));
2356 vcpu
->arch
.efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2357 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2358 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
2361 * Guest state is invalid and unrestricted guest is disabled,
2362 * which means L1 attempted VMEntry to L2 with invalid state.
2365 if (vmx
->emulation_required
) {
2366 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
2370 /* Shadow page tables on either EPT or shadow page tables. */
2371 if (nested_vmx_load_cr3(vcpu
, vmcs12
->guest_cr3
, nested_cpu_has_ept(vmcs12
),
2372 entry_failure_code
))
2376 vcpu
->arch
.walk_mmu
->inject_page_fault
= vmx_inject_page_fault_nested
;
2378 kvm_rsp_write(vcpu
, vmcs12
->guest_rsp
);
2379 kvm_rip_write(vcpu
, vmcs12
->guest_rip
);
2383 static int nested_vmx_check_nmi_controls(struct vmcs12
*vmcs12
)
2385 if (!nested_cpu_has_nmi_exiting(vmcs12
) &&
2386 nested_cpu_has_virtual_nmis(vmcs12
))
2389 if (!nested_cpu_has_virtual_nmis(vmcs12
) &&
2390 nested_cpu_has(vmcs12
, CPU_BASED_VIRTUAL_NMI_PENDING
))
2396 static bool valid_ept_address(struct kvm_vcpu
*vcpu
, u64 address
)
2398 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2399 int maxphyaddr
= cpuid_maxphyaddr(vcpu
);
2401 /* Check for memory type validity */
2402 switch (address
& VMX_EPTP_MT_MASK
) {
2403 case VMX_EPTP_MT_UC
:
2404 if (!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_UC_BIT
))
2407 case VMX_EPTP_MT_WB
:
2408 if (!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_WB_BIT
))
2415 /* only 4 levels page-walk length are valid */
2416 if ((address
& VMX_EPTP_PWL_MASK
) != VMX_EPTP_PWL_4
)
2419 /* Reserved bits should not be set */
2420 if (address
>> maxphyaddr
|| ((address
>> 7) & 0x1f))
2423 /* AD, if set, should be supported */
2424 if (address
& VMX_EPTP_AD_ENABLE_BIT
) {
2425 if (!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_AD_BIT
))
2433 * Checks related to VM-Execution Control Fields
2435 static int nested_check_vm_execution_controls(struct kvm_vcpu
*vcpu
,
2436 struct vmcs12
*vmcs12
)
2438 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2440 if (!vmx_control_verify(vmcs12
->pin_based_vm_exec_control
,
2441 vmx
->nested
.msrs
.pinbased_ctls_low
,
2442 vmx
->nested
.msrs
.pinbased_ctls_high
) ||
2443 !vmx_control_verify(vmcs12
->cpu_based_vm_exec_control
,
2444 vmx
->nested
.msrs
.procbased_ctls_low
,
2445 vmx
->nested
.msrs
.procbased_ctls_high
))
2448 if (nested_cpu_has(vmcs12
, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) &&
2449 !vmx_control_verify(vmcs12
->secondary_vm_exec_control
,
2450 vmx
->nested
.msrs
.secondary_ctls_low
,
2451 vmx
->nested
.msrs
.secondary_ctls_high
))
2454 if (vmcs12
->cr3_target_count
> nested_cpu_vmx_misc_cr3_count(vcpu
) ||
2455 nested_vmx_check_io_bitmap_controls(vcpu
, vmcs12
) ||
2456 nested_vmx_check_msr_bitmap_controls(vcpu
, vmcs12
) ||
2457 nested_vmx_check_tpr_shadow_controls(vcpu
, vmcs12
) ||
2458 nested_vmx_check_apic_access_controls(vcpu
, vmcs12
) ||
2459 nested_vmx_check_apicv_controls(vcpu
, vmcs12
) ||
2460 nested_vmx_check_nmi_controls(vmcs12
) ||
2461 nested_vmx_check_pml_controls(vcpu
, vmcs12
) ||
2462 nested_vmx_check_unrestricted_guest_controls(vcpu
, vmcs12
) ||
2463 nested_vmx_check_mode_based_ept_exec_controls(vcpu
, vmcs12
) ||
2464 nested_vmx_check_shadow_vmcs_controls(vcpu
, vmcs12
) ||
2465 (nested_cpu_has_vpid(vmcs12
) && !vmcs12
->virtual_processor_id
))
2468 if (!nested_cpu_has_preemption_timer(vmcs12
) &&
2469 nested_cpu_has_save_preemption_timer(vmcs12
))
2472 if (nested_cpu_has_ept(vmcs12
) &&
2473 !valid_ept_address(vcpu
, vmcs12
->ept_pointer
))
2476 if (nested_cpu_has_vmfunc(vmcs12
)) {
2477 if (vmcs12
->vm_function_control
&
2478 ~vmx
->nested
.msrs
.vmfunc_controls
)
2481 if (nested_cpu_has_eptp_switching(vmcs12
)) {
2482 if (!nested_cpu_has_ept(vmcs12
) ||
2483 !page_address_valid(vcpu
, vmcs12
->eptp_list_address
))
2492 * Checks related to VM-Exit Control Fields
2494 static int nested_check_vm_exit_controls(struct kvm_vcpu
*vcpu
,
2495 struct vmcs12
*vmcs12
)
2497 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2499 if (!vmx_control_verify(vmcs12
->vm_exit_controls
,
2500 vmx
->nested
.msrs
.exit_ctls_low
,
2501 vmx
->nested
.msrs
.exit_ctls_high
) ||
2502 nested_vmx_check_exit_msr_switch_controls(vcpu
, vmcs12
))
2509 * Checks related to VM-Entry Control Fields
2511 static int nested_check_vm_entry_controls(struct kvm_vcpu
*vcpu
,
2512 struct vmcs12
*vmcs12
)
2514 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2516 if (!vmx_control_verify(vmcs12
->vm_entry_controls
,
2517 vmx
->nested
.msrs
.entry_ctls_low
,
2518 vmx
->nested
.msrs
.entry_ctls_high
))
2522 * From the Intel SDM, volume 3:
2523 * Fields relevant to VM-entry event injection must be set properly.
2524 * These fields are the VM-entry interruption-information field, the
2525 * VM-entry exception error code, and the VM-entry instruction length.
2527 if (vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) {
2528 u32 intr_info
= vmcs12
->vm_entry_intr_info_field
;
2529 u8 vector
= intr_info
& INTR_INFO_VECTOR_MASK
;
2530 u32 intr_type
= intr_info
& INTR_INFO_INTR_TYPE_MASK
;
2531 bool has_error_code
= intr_info
& INTR_INFO_DELIVER_CODE_MASK
;
2532 bool should_have_error_code
;
2533 bool urg
= nested_cpu_has2(vmcs12
,
2534 SECONDARY_EXEC_UNRESTRICTED_GUEST
);
2535 bool prot_mode
= !urg
|| vmcs12
->guest_cr0
& X86_CR0_PE
;
2537 /* VM-entry interruption-info field: interruption type */
2538 if (intr_type
== INTR_TYPE_RESERVED
||
2539 (intr_type
== INTR_TYPE_OTHER_EVENT
&&
2540 !nested_cpu_supports_monitor_trap_flag(vcpu
)))
2543 /* VM-entry interruption-info field: vector */
2544 if ((intr_type
== INTR_TYPE_NMI_INTR
&& vector
!= NMI_VECTOR
) ||
2545 (intr_type
== INTR_TYPE_HARD_EXCEPTION
&& vector
> 31) ||
2546 (intr_type
== INTR_TYPE_OTHER_EVENT
&& vector
!= 0))
2549 /* VM-entry interruption-info field: deliver error code */
2550 should_have_error_code
=
2551 intr_type
== INTR_TYPE_HARD_EXCEPTION
&& prot_mode
&&
2552 x86_exception_has_error_code(vector
);
2553 if (has_error_code
!= should_have_error_code
)
2556 /* VM-entry exception error code */
2557 if (has_error_code
&&
2558 vmcs12
->vm_entry_exception_error_code
& GENMASK(31, 15))
2561 /* VM-entry interruption-info field: reserved bits */
2562 if (intr_info
& INTR_INFO_RESVD_BITS_MASK
)
2565 /* VM-entry instruction length */
2566 switch (intr_type
) {
2567 case INTR_TYPE_SOFT_EXCEPTION
:
2568 case INTR_TYPE_SOFT_INTR
:
2569 case INTR_TYPE_PRIV_SW_EXCEPTION
:
2570 if ((vmcs12
->vm_entry_instruction_len
> 15) ||
2571 (vmcs12
->vm_entry_instruction_len
== 0 &&
2572 !nested_cpu_has_zero_length_injection(vcpu
)))
2577 if (nested_vmx_check_entry_msr_switch_controls(vcpu
, vmcs12
))
2583 static int nested_vmx_check_controls(struct kvm_vcpu
*vcpu
,
2584 struct vmcs12
*vmcs12
)
2586 if (nested_check_vm_execution_controls(vcpu
, vmcs12
) ||
2587 nested_check_vm_exit_controls(vcpu
, vmcs12
) ||
2588 nested_check_vm_entry_controls(vcpu
, vmcs12
))
2594 static int nested_vmx_check_host_state(struct kvm_vcpu
*vcpu
,
2595 struct vmcs12
*vmcs12
)
2599 if (!nested_host_cr0_valid(vcpu
, vmcs12
->host_cr0
) ||
2600 !nested_host_cr4_valid(vcpu
, vmcs12
->host_cr4
) ||
2601 !nested_cr3_valid(vcpu
, vmcs12
->host_cr3
))
2604 if (is_noncanonical_address(vmcs12
->host_ia32_sysenter_esp
, vcpu
) ||
2605 is_noncanonical_address(vmcs12
->host_ia32_sysenter_eip
, vcpu
))
2608 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) &&
2609 !kvm_pat_valid(vmcs12
->host_ia32_pat
))
2613 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2614 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2615 * the values of the LMA and LME bits in the field must each be that of
2616 * the host address-space size VM-exit control.
2618 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
) {
2619 ia32e
= (vmcs12
->vm_exit_controls
&
2620 VM_EXIT_HOST_ADDR_SPACE_SIZE
) != 0;
2621 if (!kvm_valid_efer(vcpu
, vmcs12
->host_ia32_efer
) ||
2622 ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LMA
) ||
2623 ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LME
))
2630 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu
*vcpu
,
2631 struct vmcs12
*vmcs12
)
2634 struct vmcs12
*shadow
;
2635 struct kvm_host_map map
;
2637 if (vmcs12
->vmcs_link_pointer
== -1ull)
2640 if (!page_address_valid(vcpu
, vmcs12
->vmcs_link_pointer
))
2643 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
))
2648 if (shadow
->hdr
.revision_id
!= VMCS12_REVISION
||
2649 shadow
->hdr
.shadow_vmcs
!= nested_cpu_has_shadow_vmcs(vmcs12
))
2652 kvm_vcpu_unmap(vcpu
, &map
, false);
2657 * Checks related to Guest Non-register State
2659 static int nested_check_guest_non_reg_state(struct vmcs12
*vmcs12
)
2661 if (vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_ACTIVE
&&
2662 vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_HLT
)
2668 static int nested_vmx_check_guest_state(struct kvm_vcpu
*vcpu
,
2669 struct vmcs12
*vmcs12
,
2674 *exit_qual
= ENTRY_FAIL_DEFAULT
;
2676 if (!nested_guest_cr0_valid(vcpu
, vmcs12
->guest_cr0
) ||
2677 !nested_guest_cr4_valid(vcpu
, vmcs12
->guest_cr4
))
2680 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
) &&
2681 !kvm_pat_valid(vmcs12
->guest_ia32_pat
))
2684 if (nested_vmx_check_vmcs_link_ptr(vcpu
, vmcs12
)) {
2685 *exit_qual
= ENTRY_FAIL_VMCS_LINK_PTR
;
2690 * If the load IA32_EFER VM-entry control is 1, the following checks
2691 * are performed on the field for the IA32_EFER MSR:
2692 * - Bits reserved in the IA32_EFER MSR must be 0.
2693 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2694 * the IA-32e mode guest VM-exit control. It must also be identical
2695 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2698 if (to_vmx(vcpu
)->nested
.nested_run_pending
&&
2699 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
)) {
2700 ia32e
= (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) != 0;
2701 if (!kvm_valid_efer(vcpu
, vmcs12
->guest_ia32_efer
) ||
2702 ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LMA
) ||
2703 ((vmcs12
->guest_cr0
& X86_CR0_PG
) &&
2704 ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LME
)))
2708 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
) &&
2709 (is_noncanonical_address(vmcs12
->guest_bndcfgs
& PAGE_MASK
, vcpu
) ||
2710 (vmcs12
->guest_bndcfgs
& MSR_IA32_BNDCFGS_RSVD
)))
2713 if (nested_check_guest_non_reg_state(vmcs12
))
2719 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu
*vcpu
)
2721 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2722 unsigned long cr3
, cr4
;
2725 if (!nested_early_check
)
2728 if (vmx
->msr_autoload
.host
.nr
)
2729 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
2730 if (vmx
->msr_autoload
.guest
.nr
)
2731 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
2735 vmx_prepare_switch_to_guest(vcpu
);
2738 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2739 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2740 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2741 * there is no need to preserve other bits or save/restore the field.
2743 vmcs_writel(GUEST_RFLAGS
, 0);
2745 cr3
= __get_current_cr3_fast();
2746 if (unlikely(cr3
!= vmx
->loaded_vmcs
->host_state
.cr3
)) {
2747 vmcs_writel(HOST_CR3
, cr3
);
2748 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
2751 cr4
= cr4_read_shadow();
2752 if (unlikely(cr4
!= vmx
->loaded_vmcs
->host_state
.cr4
)) {
2753 vmcs_writel(HOST_CR4
, cr4
);
2754 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
2758 "sub $%c[wordsize], %%" _ASM_SP
"\n\t" /* temporarily adjust RSP for CALL */
2759 "cmp %%" _ASM_SP
", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2761 __ex("vmwrite %%" _ASM_SP
", %[HOST_RSP]") "\n\t"
2762 "mov %%" _ASM_SP
", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2764 "add $%c[wordsize], %%" _ASM_SP
"\n\t" /* un-adjust RSP */
2766 /* Check if vmlaunch or vmresume is needed */
2767 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
2770 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2771 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2772 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
2773 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
2775 "call vmx_vmenter\n\t"
2778 : ASM_CALL_CONSTRAINT
, CC_OUT(be
) (vm_fail
)
2779 : [HOST_RSP
]"r"((unsigned long)HOST_RSP
),
2780 [loaded_vmcs
]"r"(vmx
->loaded_vmcs
),
2781 [launched
]"i"(offsetof(struct loaded_vmcs
, launched
)),
2782 [host_state_rsp
]"i"(offsetof(struct loaded_vmcs
, host_state
.rsp
)),
2783 [wordsize
]"i"(sizeof(ulong
))
2789 if (vmx
->msr_autoload
.host
.nr
)
2790 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
2791 if (vmx
->msr_autoload
.guest
.nr
)
2792 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
2795 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR
) !=
2796 VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
2801 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2804 if (hw_breakpoint_active())
2805 set_debugreg(__this_cpu_read(cpu_dr7
), 7);
2808 * A non-failing VMEntry means we somehow entered guest mode with
2809 * an illegal RIP, and that's just the tip of the iceberg. There
2810 * is no telling what memory has been modified or what state has
2811 * been exposed to unknown code. Hitting this all but guarantees
2812 * a (very critical) hardware issue.
2814 WARN_ON(!(vmcs_read32(VM_EXIT_REASON
) &
2815 VMX_EXIT_REASONS_FAILED_VMENTRY
));
2820 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu
*vcpu
,
2821 struct vmcs12
*vmcs12
);
2823 static void nested_get_vmcs12_pages(struct kvm_vcpu
*vcpu
)
2825 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
2826 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2827 struct kvm_host_map
*map
;
2831 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
2833 * Translate L1 physical address to host physical
2834 * address for vmcs02. Keep the page pinned, so this
2835 * physical address remains valid. We keep a reference
2836 * to it so we can release it later.
2838 if (vmx
->nested
.apic_access_page
) { /* shouldn't happen */
2839 kvm_release_page_dirty(vmx
->nested
.apic_access_page
);
2840 vmx
->nested
.apic_access_page
= NULL
;
2842 page
= kvm_vcpu_gpa_to_page(vcpu
, vmcs12
->apic_access_addr
);
2844 * If translation failed, no matter: This feature asks
2845 * to exit when accessing the given address, and if it
2846 * can never be accessed, this feature won't do
2849 if (!is_error_page(page
)) {
2850 vmx
->nested
.apic_access_page
= page
;
2851 hpa
= page_to_phys(vmx
->nested
.apic_access_page
);
2852 vmcs_write64(APIC_ACCESS_ADDR
, hpa
);
2854 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL
,
2855 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
);
2859 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
2860 map
= &vmx
->nested
.virtual_apic_map
;
2863 * If translation failed, VM entry will fail because
2864 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
2866 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->virtual_apic_page_addr
), map
)) {
2867 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, pfn_to_hpa(map
->pfn
));
2868 } else if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
) &&
2869 nested_cpu_has(vmcs12
, CPU_BASED_CR8_STORE_EXITING
) &&
2870 !nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
2872 * The processor will never use the TPR shadow, simply
2873 * clear the bit from the execution control. Such a
2874 * configuration is useless, but it happens in tests.
2875 * For any other configuration, failing the vm entry is
2876 * _not_ what the processor does but it's basically the
2877 * only possibility we have.
2879 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL
,
2880 CPU_BASED_TPR_SHADOW
);
2882 printk("bad virtual-APIC page address\n");
2887 if (nested_cpu_has_posted_intr(vmcs12
)) {
2888 map
= &vmx
->nested
.pi_desc_map
;
2890 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->posted_intr_desc_addr
), map
)) {
2891 vmx
->nested
.pi_desc
=
2892 (struct pi_desc
*)(((void *)map
->hva
) +
2893 offset_in_page(vmcs12
->posted_intr_desc_addr
));
2894 vmcs_write64(POSTED_INTR_DESC_ADDR
,
2895 pfn_to_hpa(map
->pfn
) + offset_in_page(vmcs12
->posted_intr_desc_addr
));
2898 if (nested_vmx_prepare_msr_bitmap(vcpu
, vmcs12
))
2899 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL
,
2900 CPU_BASED_USE_MSR_BITMAPS
);
2902 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL
,
2903 CPU_BASED_USE_MSR_BITMAPS
);
2907 * Intel's VMX Instruction Reference specifies a common set of prerequisites
2908 * for running VMX instructions (except VMXON, whose prerequisites are
2909 * slightly different). It also specifies what exception to inject otherwise.
2910 * Note that many of these exceptions have priority over VM exits, so they
2911 * don't have to be checked again here.
2913 static int nested_vmx_check_permission(struct kvm_vcpu
*vcpu
)
2915 if (!to_vmx(vcpu
)->nested
.vmxon
) {
2916 kvm_queue_exception(vcpu
, UD_VECTOR
);
2920 if (vmx_get_cpl(vcpu
)) {
2921 kvm_inject_gp(vcpu
, 0);
2928 static u8
vmx_has_apicv_interrupt(struct kvm_vcpu
*vcpu
)
2930 u8 rvi
= vmx_get_rvi();
2931 u8 vppr
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_PROCPRI
);
2933 return ((rvi
& 0xf0) > (vppr
& 0xf0));
2936 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
2937 struct vmcs12
*vmcs12
);
2940 * If from_vmentry is false, this is being called from state restore (either RSM
2941 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
2944 + * 0 - success, i.e. proceed with actual VMEnter
2945 + * 1 - consistency check VMExit
2946 + * -1 - consistency check VMFail
2948 int nested_vmx_enter_non_root_mode(struct kvm_vcpu
*vcpu
, bool from_vmentry
)
2950 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2951 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
2952 bool evaluate_pending_interrupts
;
2953 u32 exit_reason
= EXIT_REASON_INVALID_STATE
;
2956 evaluate_pending_interrupts
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
) &
2957 (CPU_BASED_VIRTUAL_INTR_PENDING
| CPU_BASED_VIRTUAL_NMI_PENDING
);
2958 if (likely(!evaluate_pending_interrupts
) && kvm_vcpu_apicv_active(vcpu
))
2959 evaluate_pending_interrupts
|= vmx_has_apicv_interrupt(vcpu
);
2961 if (!(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
))
2962 vmx
->nested
.vmcs01_debugctl
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
2963 if (kvm_mpx_supported() &&
2964 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
2965 vmx
->nested
.vmcs01_guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
2967 vmx_switch_vmcs(vcpu
, &vmx
->nested
.vmcs02
);
2969 prepare_vmcs02_early(vmx
, vmcs12
);
2972 nested_get_vmcs12_pages(vcpu
);
2974 if (nested_vmx_check_vmentry_hw(vcpu
)) {
2975 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
2979 if (nested_vmx_check_guest_state(vcpu
, vmcs12
, &exit_qual
))
2980 goto vmentry_fail_vmexit
;
2983 enter_guest_mode(vcpu
);
2984 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETING
)
2985 vcpu
->arch
.tsc_offset
+= vmcs12
->tsc_offset
;
2987 if (prepare_vmcs02(vcpu
, vmcs12
, &exit_qual
))
2988 goto vmentry_fail_vmexit_guest_mode
;
2991 exit_reason
= EXIT_REASON_MSR_LOAD_FAIL
;
2992 exit_qual
= nested_vmx_load_msr(vcpu
,
2993 vmcs12
->vm_entry_msr_load_addr
,
2994 vmcs12
->vm_entry_msr_load_count
);
2996 goto vmentry_fail_vmexit_guest_mode
;
2999 * The MMU is not initialized to point at the right entities yet and
3000 * "get pages" would need to read data from the guest (i.e. we will
3001 * need to perform gpa to hpa translation). Request a call
3002 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3003 * have already been set at vmentry time and should not be reset.
3005 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES
, vcpu
);
3009 * If L1 had a pending IRQ/NMI until it executed
3010 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3011 * disallowed (e.g. interrupts disabled), L0 needs to
3012 * evaluate if this pending event should cause an exit from L2
3013 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3014 * intercept EXTERNAL_INTERRUPT).
3016 * Usually this would be handled by the processor noticing an
3017 * IRQ/NMI window request, or checking RVI during evaluation of
3018 * pending virtual interrupts. However, this setting was done
3019 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3020 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3022 if (unlikely(evaluate_pending_interrupts
))
3023 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3026 * Do not start the preemption timer hrtimer until after we know
3027 * we are successful, so that only nested_vmx_vmexit needs to cancel
3030 vmx
->nested
.preemption_timer_expired
= false;
3031 if (nested_cpu_has_preemption_timer(vmcs12
))
3032 vmx_start_preemption_timer(vcpu
);
3035 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3036 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3037 * returned as far as L1 is concerned. It will only return (and set
3038 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3043 * A failed consistency check that leads to a VMExit during L1's
3044 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3045 * 26.7 "VM-entry failures during or after loading guest state".
3047 vmentry_fail_vmexit_guest_mode
:
3048 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETING
)
3049 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
3050 leave_guest_mode(vcpu
);
3052 vmentry_fail_vmexit
:
3053 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3058 load_vmcs12_host_state(vcpu
, vmcs12
);
3059 vmcs12
->vm_exit_reason
= exit_reason
| VMX_EXIT_REASONS_FAILED_VMENTRY
;
3060 vmcs12
->exit_qualification
= exit_qual
;
3061 if (enable_shadow_vmcs
|| vmx
->nested
.hv_evmcs
)
3062 vmx
->nested
.need_vmcs12_sync
= true;
3067 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3068 * for running an L2 nested guest.
3070 static int nested_vmx_run(struct kvm_vcpu
*vcpu
, bool launch
)
3072 struct vmcs12
*vmcs12
;
3073 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3074 u32 interrupt_shadow
= vmx_get_interrupt_shadow(vcpu
);
3077 if (!nested_vmx_check_permission(vcpu
))
3080 if (!nested_vmx_handle_enlightened_vmptrld(vcpu
, true))
3083 if (!vmx
->nested
.hv_evmcs
&& vmx
->nested
.current_vmptr
== -1ull)
3084 return nested_vmx_failInvalid(vcpu
);
3086 vmcs12
= get_vmcs12(vcpu
);
3089 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3090 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3091 * rather than RFLAGS.ZF, and no error number is stored to the
3092 * VM-instruction error field.
3094 if (vmcs12
->hdr
.shadow_vmcs
)
3095 return nested_vmx_failInvalid(vcpu
);
3097 if (vmx
->nested
.hv_evmcs
) {
3098 copy_enlightened_to_vmcs12(vmx
);
3099 /* Enlightened VMCS doesn't have launch state */
3100 vmcs12
->launch_state
= !launch
;
3101 } else if (enable_shadow_vmcs
) {
3102 copy_shadow_to_vmcs12(vmx
);
3106 * The nested entry process starts with enforcing various prerequisites
3107 * on vmcs12 as required by the Intel SDM, and act appropriately when
3108 * they fail: As the SDM explains, some conditions should cause the
3109 * instruction to fail, while others will cause the instruction to seem
3110 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3111 * To speed up the normal (success) code path, we should avoid checking
3112 * for misconfigurations which will anyway be caught by the processor
3113 * when using the merged vmcs02.
3115 if (interrupt_shadow
& KVM_X86_SHADOW_INT_MOV_SS
)
3116 return nested_vmx_failValid(vcpu
,
3117 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS
);
3119 if (vmcs12
->launch_state
== launch
)
3120 return nested_vmx_failValid(vcpu
,
3121 launch
? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3122 : VMXERR_VMRESUME_NONLAUNCHED_VMCS
);
3124 if (nested_vmx_check_controls(vcpu
, vmcs12
))
3125 return nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3127 if (nested_vmx_check_host_state(vcpu
, vmcs12
))
3128 return nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD
);
3131 * We're finally done with prerequisite checking, and can start with
3134 vmx
->nested
.nested_run_pending
= 1;
3135 ret
= nested_vmx_enter_non_root_mode(vcpu
, true);
3136 vmx
->nested
.nested_run_pending
= !ret
;
3140 return nested_vmx_failValid(vcpu
,
3141 VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3143 /* Hide L1D cache contents from the nested guest. */
3144 vmx
->vcpu
.arch
.l1tf_flush_l1d
= true;
3147 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3148 * also be used as part of restoring nVMX state for
3149 * snapshot restore (migration).
3151 * In this flow, it is assumed that vmcs12 cache was
3152 * trasferred as part of captured nVMX state and should
3153 * therefore not be read from guest memory (which may not
3154 * exist on destination host yet).
3156 nested_cache_shadow_vmcs12(vcpu
, vmcs12
);
3159 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3160 * awakened by event injection or by an NMI-window VM-exit or
3161 * by an interrupt-window VM-exit, halt the vcpu.
3163 if ((vmcs12
->guest_activity_state
== GUEST_ACTIVITY_HLT
) &&
3164 !(vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) &&
3165 !(vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_VIRTUAL_NMI_PENDING
) &&
3166 !((vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_VIRTUAL_INTR_PENDING
) &&
3167 (vmcs12
->guest_rflags
& X86_EFLAGS_IF
))) {
3168 vmx
->nested
.nested_run_pending
= 0;
3169 return kvm_vcpu_halt(vcpu
);
3175 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3176 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3177 * This function returns the new value we should put in vmcs12.guest_cr0.
3178 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3179 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3180 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3181 * didn't trap the bit, because if L1 did, so would L0).
3182 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3183 * been modified by L2, and L1 knows it. So just leave the old value of
3184 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3185 * isn't relevant, because if L0 traps this bit it can set it to anything.
3186 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3187 * changed these bits, and therefore they need to be updated, but L0
3188 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3189 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3191 static inline unsigned long
3192 vmcs12_guest_cr0(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3195 /*1*/ (vmcs_readl(GUEST_CR0
) & vcpu
->arch
.cr0_guest_owned_bits
) |
3196 /*2*/ (vmcs12
->guest_cr0
& vmcs12
->cr0_guest_host_mask
) |
3197 /*3*/ (vmcs_readl(CR0_READ_SHADOW
) & ~(vmcs12
->cr0_guest_host_mask
|
3198 vcpu
->arch
.cr0_guest_owned_bits
));
3201 static inline unsigned long
3202 vmcs12_guest_cr4(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3205 /*1*/ (vmcs_readl(GUEST_CR4
) & vcpu
->arch
.cr4_guest_owned_bits
) |
3206 /*2*/ (vmcs12
->guest_cr4
& vmcs12
->cr4_guest_host_mask
) |
3207 /*3*/ (vmcs_readl(CR4_READ_SHADOW
) & ~(vmcs12
->cr4_guest_host_mask
|
3208 vcpu
->arch
.cr4_guest_owned_bits
));
3211 static void vmcs12_save_pending_event(struct kvm_vcpu
*vcpu
,
3212 struct vmcs12
*vmcs12
)
3217 if (vcpu
->arch
.exception
.injected
) {
3218 nr
= vcpu
->arch
.exception
.nr
;
3219 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3221 if (kvm_exception_is_soft(nr
)) {
3222 vmcs12
->vm_exit_instruction_len
=
3223 vcpu
->arch
.event_exit_inst_len
;
3224 idt_vectoring
|= INTR_TYPE_SOFT_EXCEPTION
;
3226 idt_vectoring
|= INTR_TYPE_HARD_EXCEPTION
;
3228 if (vcpu
->arch
.exception
.has_error_code
) {
3229 idt_vectoring
|= VECTORING_INFO_DELIVER_CODE_MASK
;
3230 vmcs12
->idt_vectoring_error_code
=
3231 vcpu
->arch
.exception
.error_code
;
3234 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3235 } else if (vcpu
->arch
.nmi_injected
) {
3236 vmcs12
->idt_vectoring_info_field
=
3237 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
;
3238 } else if (vcpu
->arch
.interrupt
.injected
) {
3239 nr
= vcpu
->arch
.interrupt
.nr
;
3240 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3242 if (vcpu
->arch
.interrupt
.soft
) {
3243 idt_vectoring
|= INTR_TYPE_SOFT_INTR
;
3244 vmcs12
->vm_entry_instruction_len
=
3245 vcpu
->arch
.event_exit_inst_len
;
3247 idt_vectoring
|= INTR_TYPE_EXT_INTR
;
3249 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3254 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu
*vcpu
)
3256 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3260 * Don't need to mark the APIC access page dirty; it is never
3261 * written to by the CPU during APIC virtualization.
3264 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3265 gfn
= vmcs12
->virtual_apic_page_addr
>> PAGE_SHIFT
;
3266 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3269 if (nested_cpu_has_posted_intr(vmcs12
)) {
3270 gfn
= vmcs12
->posted_intr_desc_addr
>> PAGE_SHIFT
;
3271 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3275 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu
*vcpu
)
3277 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3282 if (!vmx
->nested
.pi_desc
|| !vmx
->nested
.pi_pending
)
3285 vmx
->nested
.pi_pending
= false;
3286 if (!pi_test_and_clear_on(vmx
->nested
.pi_desc
))
3289 max_irr
= find_last_bit((unsigned long *)vmx
->nested
.pi_desc
->pir
, 256);
3290 if (max_irr
!= 256) {
3291 vapic_page
= vmx
->nested
.virtual_apic_map
.hva
;
3295 __kvm_apic_update_irr(vmx
->nested
.pi_desc
->pir
,
3296 vapic_page
, &max_irr
);
3297 status
= vmcs_read16(GUEST_INTR_STATUS
);
3298 if ((u8
)max_irr
> ((u8
)status
& 0xff)) {
3300 status
|= (u8
)max_irr
;
3301 vmcs_write16(GUEST_INTR_STATUS
, status
);
3305 nested_mark_vmcs12_pages_dirty(vcpu
);
3308 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu
*vcpu
,
3309 unsigned long exit_qual
)
3311 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3312 unsigned int nr
= vcpu
->arch
.exception
.nr
;
3313 u32 intr_info
= nr
| INTR_INFO_VALID_MASK
;
3315 if (vcpu
->arch
.exception
.has_error_code
) {
3316 vmcs12
->vm_exit_intr_error_code
= vcpu
->arch
.exception
.error_code
;
3317 intr_info
|= INTR_INFO_DELIVER_CODE_MASK
;
3320 if (kvm_exception_is_soft(nr
))
3321 intr_info
|= INTR_TYPE_SOFT_EXCEPTION
;
3323 intr_info
|= INTR_TYPE_HARD_EXCEPTION
;
3325 if (!(vmcs12
->idt_vectoring_info_field
& VECTORING_INFO_VALID_MASK
) &&
3326 vmx_get_nmi_mask(vcpu
))
3327 intr_info
|= INTR_INFO_UNBLOCK_NMI
;
3329 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
, intr_info
, exit_qual
);
3332 static int vmx_check_nested_events(struct kvm_vcpu
*vcpu
, bool external_intr
)
3334 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3335 unsigned long exit_qual
;
3336 bool block_nested_events
=
3337 vmx
->nested
.nested_run_pending
|| kvm_event_needs_reinjection(vcpu
);
3339 if (vcpu
->arch
.exception
.pending
&&
3340 nested_vmx_check_exception(vcpu
, &exit_qual
)) {
3341 if (block_nested_events
)
3343 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3347 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu
)) &&
3348 vmx
->nested
.preemption_timer_expired
) {
3349 if (block_nested_events
)
3351 nested_vmx_vmexit(vcpu
, EXIT_REASON_PREEMPTION_TIMER
, 0, 0);
3355 if (vcpu
->arch
.nmi_pending
&& nested_exit_on_nmi(vcpu
)) {
3356 if (block_nested_events
)
3358 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
3359 NMI_VECTOR
| INTR_TYPE_NMI_INTR
|
3360 INTR_INFO_VALID_MASK
, 0);
3362 * The NMI-triggered VM exit counts as injection:
3363 * clear this one and block further NMIs.
3365 vcpu
->arch
.nmi_pending
= 0;
3366 vmx_set_nmi_mask(vcpu
, true);
3370 if ((kvm_cpu_has_interrupt(vcpu
) || external_intr
) &&
3371 nested_exit_on_intr(vcpu
)) {
3372 if (block_nested_events
)
3374 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXTERNAL_INTERRUPT
, 0, 0);
3378 vmx_complete_nested_posted_interrupt(vcpu
);
3382 static u32
vmx_get_preemption_timer_value(struct kvm_vcpu
*vcpu
)
3385 hrtimer_get_remaining(&to_vmx(vcpu
)->nested
.preemption_timer
);
3388 if (ktime_to_ns(remaining
) <= 0)
3391 value
= ktime_to_ns(remaining
) * vcpu
->arch
.virtual_tsc_khz
;
3392 do_div(value
, 1000000);
3393 return value
>> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
3397 * Update the guest state fields of vmcs12 to reflect changes that
3398 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3399 * VM-entry controls is also updated, since this is really a guest
3402 static void sync_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3404 vmcs12
->guest_cr0
= vmcs12_guest_cr0(vcpu
, vmcs12
);
3405 vmcs12
->guest_cr4
= vmcs12_guest_cr4(vcpu
, vmcs12
);
3407 vmcs12
->guest_rsp
= kvm_rsp_read(vcpu
);
3408 vmcs12
->guest_rip
= kvm_rip_read(vcpu
);
3409 vmcs12
->guest_rflags
= vmcs_readl(GUEST_RFLAGS
);
3411 vmcs12
->guest_es_selector
= vmcs_read16(GUEST_ES_SELECTOR
);
3412 vmcs12
->guest_cs_selector
= vmcs_read16(GUEST_CS_SELECTOR
);
3413 vmcs12
->guest_ss_selector
= vmcs_read16(GUEST_SS_SELECTOR
);
3414 vmcs12
->guest_ds_selector
= vmcs_read16(GUEST_DS_SELECTOR
);
3415 vmcs12
->guest_fs_selector
= vmcs_read16(GUEST_FS_SELECTOR
);
3416 vmcs12
->guest_gs_selector
= vmcs_read16(GUEST_GS_SELECTOR
);
3417 vmcs12
->guest_ldtr_selector
= vmcs_read16(GUEST_LDTR_SELECTOR
);
3418 vmcs12
->guest_tr_selector
= vmcs_read16(GUEST_TR_SELECTOR
);
3419 vmcs12
->guest_es_limit
= vmcs_read32(GUEST_ES_LIMIT
);
3420 vmcs12
->guest_cs_limit
= vmcs_read32(GUEST_CS_LIMIT
);
3421 vmcs12
->guest_ss_limit
= vmcs_read32(GUEST_SS_LIMIT
);
3422 vmcs12
->guest_ds_limit
= vmcs_read32(GUEST_DS_LIMIT
);
3423 vmcs12
->guest_fs_limit
= vmcs_read32(GUEST_FS_LIMIT
);
3424 vmcs12
->guest_gs_limit
= vmcs_read32(GUEST_GS_LIMIT
);
3425 vmcs12
->guest_ldtr_limit
= vmcs_read32(GUEST_LDTR_LIMIT
);
3426 vmcs12
->guest_tr_limit
= vmcs_read32(GUEST_TR_LIMIT
);
3427 vmcs12
->guest_gdtr_limit
= vmcs_read32(GUEST_GDTR_LIMIT
);
3428 vmcs12
->guest_idtr_limit
= vmcs_read32(GUEST_IDTR_LIMIT
);
3429 vmcs12
->guest_es_ar_bytes
= vmcs_read32(GUEST_ES_AR_BYTES
);
3430 vmcs12
->guest_cs_ar_bytes
= vmcs_read32(GUEST_CS_AR_BYTES
);
3431 vmcs12
->guest_ss_ar_bytes
= vmcs_read32(GUEST_SS_AR_BYTES
);
3432 vmcs12
->guest_ds_ar_bytes
= vmcs_read32(GUEST_DS_AR_BYTES
);
3433 vmcs12
->guest_fs_ar_bytes
= vmcs_read32(GUEST_FS_AR_BYTES
);
3434 vmcs12
->guest_gs_ar_bytes
= vmcs_read32(GUEST_GS_AR_BYTES
);
3435 vmcs12
->guest_ldtr_ar_bytes
= vmcs_read32(GUEST_LDTR_AR_BYTES
);
3436 vmcs12
->guest_tr_ar_bytes
= vmcs_read32(GUEST_TR_AR_BYTES
);
3437 vmcs12
->guest_es_base
= vmcs_readl(GUEST_ES_BASE
);
3438 vmcs12
->guest_cs_base
= vmcs_readl(GUEST_CS_BASE
);
3439 vmcs12
->guest_ss_base
= vmcs_readl(GUEST_SS_BASE
);
3440 vmcs12
->guest_ds_base
= vmcs_readl(GUEST_DS_BASE
);
3441 vmcs12
->guest_fs_base
= vmcs_readl(GUEST_FS_BASE
);
3442 vmcs12
->guest_gs_base
= vmcs_readl(GUEST_GS_BASE
);
3443 vmcs12
->guest_ldtr_base
= vmcs_readl(GUEST_LDTR_BASE
);
3444 vmcs12
->guest_tr_base
= vmcs_readl(GUEST_TR_BASE
);
3445 vmcs12
->guest_gdtr_base
= vmcs_readl(GUEST_GDTR_BASE
);
3446 vmcs12
->guest_idtr_base
= vmcs_readl(GUEST_IDTR_BASE
);
3448 vmcs12
->guest_interruptibility_info
=
3449 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
3450 vmcs12
->guest_pending_dbg_exceptions
=
3451 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
);
3452 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
3453 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_HLT
;
3455 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_ACTIVE
;
3457 if (nested_cpu_has_preemption_timer(vmcs12
) &&
3458 vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
)
3459 vmcs12
->vmx_preemption_timer_value
=
3460 vmx_get_preemption_timer_value(vcpu
);
3463 * In some cases (usually, nested EPT), L2 is allowed to change its
3464 * own CR3 without exiting. If it has changed it, we must keep it.
3465 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3466 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3468 * Additionally, restore L2's PDPTR to vmcs12.
3471 vmcs12
->guest_cr3
= vmcs_readl(GUEST_CR3
);
3472 vmcs12
->guest_pdptr0
= vmcs_read64(GUEST_PDPTR0
);
3473 vmcs12
->guest_pdptr1
= vmcs_read64(GUEST_PDPTR1
);
3474 vmcs12
->guest_pdptr2
= vmcs_read64(GUEST_PDPTR2
);
3475 vmcs12
->guest_pdptr3
= vmcs_read64(GUEST_PDPTR3
);
3478 vmcs12
->guest_linear_address
= vmcs_readl(GUEST_LINEAR_ADDRESS
);
3480 if (nested_cpu_has_vid(vmcs12
))
3481 vmcs12
->guest_intr_status
= vmcs_read16(GUEST_INTR_STATUS
);
3483 vmcs12
->vm_entry_controls
=
3484 (vmcs12
->vm_entry_controls
& ~VM_ENTRY_IA32E_MODE
) |
3485 (vm_entry_controls_get(to_vmx(vcpu
)) & VM_ENTRY_IA32E_MODE
);
3487 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_DEBUG_CONTROLS
) {
3488 kvm_get_dr(vcpu
, 7, (unsigned long *)&vmcs12
->guest_dr7
);
3489 vmcs12
->guest_ia32_debugctl
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
3492 /* TODO: These cannot have changed unless we have MSR bitmaps and
3493 * the relevant bit asks not to trap the change */
3494 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_IA32_PAT
)
3495 vmcs12
->guest_ia32_pat
= vmcs_read64(GUEST_IA32_PAT
);
3496 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_IA32_EFER
)
3497 vmcs12
->guest_ia32_efer
= vcpu
->arch
.efer
;
3498 vmcs12
->guest_sysenter_cs
= vmcs_read32(GUEST_SYSENTER_CS
);
3499 vmcs12
->guest_sysenter_esp
= vmcs_readl(GUEST_SYSENTER_ESP
);
3500 vmcs12
->guest_sysenter_eip
= vmcs_readl(GUEST_SYSENTER_EIP
);
3501 if (kvm_mpx_supported())
3502 vmcs12
->guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
3506 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3507 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3508 * and this function updates it to reflect the changes to the guest state while
3509 * L2 was running (and perhaps made some exits which were handled directly by L0
3510 * without going back to L1), and to reflect the exit reason.
3511 * Note that we do not have to copy here all VMCS fields, just those that
3512 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3513 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3514 * which already writes to vmcs12 directly.
3516 static void prepare_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
3517 u32 exit_reason
, u32 exit_intr_info
,
3518 unsigned long exit_qualification
)
3520 /* update guest state fields: */
3521 sync_vmcs12(vcpu
, vmcs12
);
3523 /* update exit information fields: */
3525 vmcs12
->vm_exit_reason
= exit_reason
;
3526 vmcs12
->exit_qualification
= exit_qualification
;
3527 vmcs12
->vm_exit_intr_info
= exit_intr_info
;
3529 vmcs12
->idt_vectoring_info_field
= 0;
3530 vmcs12
->vm_exit_instruction_len
= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
3531 vmcs12
->vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
3533 if (!(vmcs12
->vm_exit_reason
& VMX_EXIT_REASONS_FAILED_VMENTRY
)) {
3534 vmcs12
->launch_state
= 1;
3536 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3537 * instead of reading the real value. */
3538 vmcs12
->vm_entry_intr_info_field
&= ~INTR_INFO_VALID_MASK
;
3541 * Transfer the event that L0 or L1 may wanted to inject into
3542 * L2 to IDT_VECTORING_INFO_FIELD.
3544 vmcs12_save_pending_event(vcpu
, vmcs12
);
3547 * According to spec, there's no need to store the guest's
3548 * MSRs if the exit is due to a VM-entry failure that occurs
3549 * during or after loading the guest state. Since this exit
3550 * does not fall in that category, we need to save the MSRs.
3552 if (nested_vmx_store_msr(vcpu
,
3553 vmcs12
->vm_exit_msr_store_addr
,
3554 vmcs12
->vm_exit_msr_store_count
))
3555 nested_vmx_abort(vcpu
,
3556 VMX_ABORT_SAVE_GUEST_MSR_FAIL
);
3560 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3561 * preserved above and would only end up incorrectly in L1.
3563 vcpu
->arch
.nmi_injected
= false;
3564 kvm_clear_exception_queue(vcpu
);
3565 kvm_clear_interrupt_queue(vcpu
);
3569 * A part of what we need to when the nested L2 guest exits and we want to
3570 * run its L1 parent, is to reset L1's guest state to the host state specified
3572 * This function is to be called not only on normal nested exit, but also on
3573 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3574 * Failures During or After Loading Guest State").
3575 * This function should be called when the active VMCS is L1's (vmcs01).
3577 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
3578 struct vmcs12
*vmcs12
)
3580 struct kvm_segment seg
;
3581 u32 entry_failure_code
;
3583 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
)
3584 vcpu
->arch
.efer
= vmcs12
->host_ia32_efer
;
3585 else if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
3586 vcpu
->arch
.efer
|= (EFER_LMA
| EFER_LME
);
3588 vcpu
->arch
.efer
&= ~(EFER_LMA
| EFER_LME
);
3589 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
3591 kvm_rsp_write(vcpu
, vmcs12
->host_rsp
);
3592 kvm_rip_write(vcpu
, vmcs12
->host_rip
);
3593 vmx_set_rflags(vcpu
, X86_EFLAGS_FIXED
);
3594 vmx_set_interrupt_shadow(vcpu
, 0);
3597 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3598 * actually changed, because vmx_set_cr0 refers to efer set above.
3600 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3601 * (KVM doesn't change it);
3603 vcpu
->arch
.cr0_guest_owned_bits
= X86_CR0_TS
;
3604 vmx_set_cr0(vcpu
, vmcs12
->host_cr0
);
3606 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3607 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
3608 vmx_set_cr4(vcpu
, vmcs12
->host_cr4
);
3610 nested_ept_uninit_mmu_context(vcpu
);
3613 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3614 * couldn't have changed.
3616 if (nested_vmx_load_cr3(vcpu
, vmcs12
->host_cr3
, false, &entry_failure_code
))
3617 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_PDPTE_FAIL
);
3620 vcpu
->arch
.walk_mmu
->inject_page_fault
= kvm_inject_page_fault
;
3623 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3624 * VMEntry/VMExit. Thus, no need to flush TLB.
3626 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3627 * flushed on every VMEntry/VMExit.
3629 * Otherwise, we can preserve TLB entries as long as we are
3630 * able to tag L1 TLB entries differently than L2 TLB entries.
3632 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3633 * and therefore we request the TLB flush to happen only after VMCS EPTP
3634 * has been set by KVM_REQ_LOAD_CR3.
3637 (!nested_cpu_has_vpid(vmcs12
) || !nested_has_guest_tlb_tag(vcpu
))) {
3638 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
3641 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->host_ia32_sysenter_cs
);
3642 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->host_ia32_sysenter_esp
);
3643 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->host_ia32_sysenter_eip
);
3644 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->host_idtr_base
);
3645 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->host_gdtr_base
);
3646 vmcs_write32(GUEST_IDTR_LIMIT
, 0xFFFF);
3647 vmcs_write32(GUEST_GDTR_LIMIT
, 0xFFFF);
3649 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3650 if (vmcs12
->vm_exit_controls
& VM_EXIT_CLEAR_BNDCFGS
)
3651 vmcs_write64(GUEST_BNDCFGS
, 0);
3653 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) {
3654 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->host_ia32_pat
);
3655 vcpu
->arch
.pat
= vmcs12
->host_ia32_pat
;
3657 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
3658 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL
,
3659 vmcs12
->host_ia32_perf_global_ctrl
);
3661 /* Set L1 segment info according to Intel SDM
3662 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3663 seg
= (struct kvm_segment
) {
3665 .limit
= 0xFFFFFFFF,
3666 .selector
= vmcs12
->host_cs_selector
,
3672 if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
3676 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_CS
);
3677 seg
= (struct kvm_segment
) {
3679 .limit
= 0xFFFFFFFF,
3686 seg
.selector
= vmcs12
->host_ds_selector
;
3687 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_DS
);
3688 seg
.selector
= vmcs12
->host_es_selector
;
3689 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_ES
);
3690 seg
.selector
= vmcs12
->host_ss_selector
;
3691 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_SS
);
3692 seg
.selector
= vmcs12
->host_fs_selector
;
3693 seg
.base
= vmcs12
->host_fs_base
;
3694 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_FS
);
3695 seg
.selector
= vmcs12
->host_gs_selector
;
3696 seg
.base
= vmcs12
->host_gs_base
;
3697 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_GS
);
3698 seg
= (struct kvm_segment
) {
3699 .base
= vmcs12
->host_tr_base
,
3701 .selector
= vmcs12
->host_tr_selector
,
3705 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_TR
);
3707 kvm_set_dr(vcpu
, 7, 0x400);
3708 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
3710 if (cpu_has_vmx_msr_bitmap())
3711 vmx_update_msr_bitmap(vcpu
);
3713 if (nested_vmx_load_msr(vcpu
, vmcs12
->vm_exit_msr_load_addr
,
3714 vmcs12
->vm_exit_msr_load_count
))
3715 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
3718 static inline u64
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx
*vmx
)
3720 struct shared_msr_entry
*efer_msr
;
3723 if (vm_entry_controls_get(vmx
) & VM_ENTRY_LOAD_IA32_EFER
)
3724 return vmcs_read64(GUEST_IA32_EFER
);
3726 if (cpu_has_load_ia32_efer())
3729 for (i
= 0; i
< vmx
->msr_autoload
.guest
.nr
; ++i
) {
3730 if (vmx
->msr_autoload
.guest
.val
[i
].index
== MSR_EFER
)
3731 return vmx
->msr_autoload
.guest
.val
[i
].value
;
3734 efer_msr
= find_msr_entry(vmx
, MSR_EFER
);
3736 return efer_msr
->data
;
3741 static void nested_vmx_restore_host_state(struct kvm_vcpu
*vcpu
)
3743 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3744 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3745 struct vmx_msr_entry g
, h
;
3746 struct msr_data msr
;
3750 vcpu
->arch
.pat
= vmcs_read64(GUEST_IA32_PAT
);
3752 if (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) {
3754 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3755 * as vmcs01.GUEST_DR7 contains a userspace defined value
3756 * and vcpu->arch.dr7 is not squirreled away before the
3757 * nested VMENTER (not worth adding a variable in nested_vmx).
3759 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
3760 kvm_set_dr(vcpu
, 7, DR7_FIXED_1
);
3762 WARN_ON(kvm_set_dr(vcpu
, 7, vmcs_readl(GUEST_DR7
)));
3766 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3767 * handle a variety of side effects to KVM's software model.
3769 vmx_set_efer(vcpu
, nested_vmx_get_vmcs01_guest_efer(vmx
));
3771 vcpu
->arch
.cr0_guest_owned_bits
= X86_CR0_TS
;
3772 vmx_set_cr0(vcpu
, vmcs_readl(CR0_READ_SHADOW
));
3774 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
3775 vmx_set_cr4(vcpu
, vmcs_readl(CR4_READ_SHADOW
));
3777 nested_ept_uninit_mmu_context(vcpu
);
3780 * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
3781 * points to shadow pages! Fortunately we only get here after a WARN_ON
3782 * if EPT is disabled, so a VMabort is perfectly fine.
3785 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
3786 __set_bit(VCPU_EXREG_CR3
, (ulong
*)&vcpu
->arch
.regs_avail
);
3788 nested_vmx_abort(vcpu
, VMX_ABORT_VMCS_CORRUPTED
);
3792 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3793 * from vmcs01 (if necessary). The PDPTRs are not loaded on
3794 * VMFail, like everything else we just need to ensure our
3795 * software model is up-to-date.
3797 ept_save_pdptrs(vcpu
);
3799 kvm_mmu_reset_context(vcpu
);
3801 if (cpu_has_vmx_msr_bitmap())
3802 vmx_update_msr_bitmap(vcpu
);
3805 * This nasty bit of open coding is a compromise between blindly
3806 * loading L1's MSRs using the exit load lists (incorrect emulation
3807 * of VMFail), leaving the nested VM's MSRs in the software model
3808 * (incorrect behavior) and snapshotting the modified MSRs (too
3809 * expensive since the lists are unbound by hardware). For each
3810 * MSR that was (prematurely) loaded from the nested VMEntry load
3811 * list, reload it from the exit load list if it exists and differs
3812 * from the guest value. The intent is to stuff host state as
3813 * silently as possible, not to fully process the exit load list.
3815 msr
.host_initiated
= false;
3816 for (i
= 0; i
< vmcs12
->vm_entry_msr_load_count
; i
++) {
3817 gpa
= vmcs12
->vm_entry_msr_load_addr
+ (i
* sizeof(g
));
3818 if (kvm_vcpu_read_guest(vcpu
, gpa
, &g
, sizeof(g
))) {
3819 pr_debug_ratelimited(
3820 "%s read MSR index failed (%u, 0x%08llx)\n",
3825 for (j
= 0; j
< vmcs12
->vm_exit_msr_load_count
; j
++) {
3826 gpa
= vmcs12
->vm_exit_msr_load_addr
+ (j
* sizeof(h
));
3827 if (kvm_vcpu_read_guest(vcpu
, gpa
, &h
, sizeof(h
))) {
3828 pr_debug_ratelimited(
3829 "%s read MSR failed (%u, 0x%08llx)\n",
3833 if (h
.index
!= g
.index
)
3835 if (h
.value
== g
.value
)
3838 if (nested_vmx_load_msr_check(vcpu
, &h
)) {
3839 pr_debug_ratelimited(
3840 "%s check failed (%u, 0x%x, 0x%x)\n",
3841 __func__
, j
, h
.index
, h
.reserved
);
3845 msr
.index
= h
.index
;
3847 if (kvm_set_msr(vcpu
, &msr
)) {
3848 pr_debug_ratelimited(
3849 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3850 __func__
, j
, h
.index
, h
.value
);
3859 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
3863 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
3864 * and modify vmcs12 to make it see what it would expect to see there if
3865 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
3867 void nested_vmx_vmexit(struct kvm_vcpu
*vcpu
, u32 exit_reason
,
3868 u32 exit_intr_info
, unsigned long exit_qualification
)
3870 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3871 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3873 /* trying to cancel vmlaunch/vmresume is a bug */
3874 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
3876 leave_guest_mode(vcpu
);
3878 if (nested_cpu_has_preemption_timer(vmcs12
))
3879 hrtimer_cancel(&to_vmx(vcpu
)->nested
.preemption_timer
);
3881 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETING
)
3882 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
3884 if (likely(!vmx
->fail
)) {
3885 if (exit_reason
== -1)
3886 sync_vmcs12(vcpu
, vmcs12
);
3888 prepare_vmcs12(vcpu
, vmcs12
, exit_reason
, exit_intr_info
,
3889 exit_qualification
);
3892 * Must happen outside of sync_vmcs12() as it will
3893 * also be used to capture vmcs12 cache as part of
3894 * capturing nVMX state for snapshot (migration).
3896 * Otherwise, this flush will dirty guest memory at a
3897 * point it is already assumed by user-space to be
3900 nested_flush_cached_shadow_vmcs12(vcpu
, vmcs12
);
3903 * The only expected VM-instruction error is "VM entry with
3904 * invalid control field(s)." Anything else indicates a
3905 * problem with L0. And we should never get here with a
3906 * VMFail of any type if early consistency checks are enabled.
3908 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR
) !=
3909 VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3910 WARN_ON_ONCE(nested_early_check
);
3913 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3915 /* Update any VMCS fields that might have changed while L2 ran */
3916 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
3917 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
3918 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
3920 if (kvm_has_tsc_control
)
3921 decache_tsc_multiplier(vmx
);
3923 if (vmx
->nested
.change_vmcs01_virtual_apic_mode
) {
3924 vmx
->nested
.change_vmcs01_virtual_apic_mode
= false;
3925 vmx_set_virtual_apic_mode(vcpu
);
3926 } else if (!nested_cpu_has_ept(vmcs12
) &&
3927 nested_cpu_has2(vmcs12
,
3928 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3929 vmx_flush_tlb(vcpu
, true);
3932 /* Unpin physical memory we referred to in vmcs02 */
3933 if (vmx
->nested
.apic_access_page
) {
3934 kvm_release_page_dirty(vmx
->nested
.apic_access_page
);
3935 vmx
->nested
.apic_access_page
= NULL
;
3937 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
3938 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
3939 vmx
->nested
.pi_desc
= NULL
;
3942 * We are now running in L2, mmu_notifier will force to reload the
3943 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
3945 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
3947 if ((exit_reason
!= -1) && (enable_shadow_vmcs
|| vmx
->nested
.hv_evmcs
))
3948 vmx
->nested
.need_vmcs12_sync
= true;
3950 /* in case we halted in L2 */
3951 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
3953 if (likely(!vmx
->fail
)) {
3955 * TODO: SDM says that with acknowledge interrupt on
3956 * exit, bit 31 of the VM-exit interrupt information
3957 * (valid interrupt) is always set to 1 on
3958 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
3959 * need kvm_cpu_has_interrupt(). See the commit
3960 * message for details.
3962 if (nested_exit_intr_ack_set(vcpu
) &&
3963 exit_reason
== EXIT_REASON_EXTERNAL_INTERRUPT
&&
3964 kvm_cpu_has_interrupt(vcpu
)) {
3965 int irq
= kvm_cpu_get_interrupt(vcpu
);
3967 vmcs12
->vm_exit_intr_info
= irq
|
3968 INTR_INFO_VALID_MASK
| INTR_TYPE_EXT_INTR
;
3971 if (exit_reason
!= -1)
3972 trace_kvm_nested_vmexit_inject(vmcs12
->vm_exit_reason
,
3973 vmcs12
->exit_qualification
,
3974 vmcs12
->idt_vectoring_info_field
,
3975 vmcs12
->vm_exit_intr_info
,
3976 vmcs12
->vm_exit_intr_error_code
,
3979 load_vmcs12_host_state(vcpu
, vmcs12
);
3985 * After an early L2 VM-entry failure, we're now back
3986 * in L1 which thinks it just finished a VMLAUNCH or
3987 * VMRESUME instruction, so we need to set the failure
3988 * flag and the VM-instruction error field of the VMCS
3989 * accordingly, and skip the emulated instruction.
3991 (void)nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3994 * Restore L1's host state to KVM's software model. We're here
3995 * because a consistency check was caught by hardware, which
3996 * means some amount of guest state has been propagated to KVM's
3997 * model and needs to be unwound to the host's state.
3999 nested_vmx_restore_host_state(vcpu
);
4005 * Decode the memory-address operand of a vmx instruction, as recorded on an
4006 * exit caused by such an instruction (run by a guest hypervisor).
4007 * On success, returns 0. When the operand is invalid, returns 1 and throws
4010 int get_vmx_mem_address(struct kvm_vcpu
*vcpu
, unsigned long exit_qualification
,
4011 u32 vmx_instruction_info
, bool wr
, gva_t
*ret
)
4015 struct kvm_segment s
;
4018 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4019 * Execution", on an exit, vmx_instruction_info holds most of the
4020 * addressing components of the operand. Only the displacement part
4021 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4022 * For how an actual address is calculated from all these components,
4023 * refer to Vol. 1, "Operand Addressing".
4025 int scaling
= vmx_instruction_info
& 3;
4026 int addr_size
= (vmx_instruction_info
>> 7) & 7;
4027 bool is_reg
= vmx_instruction_info
& (1u << 10);
4028 int seg_reg
= (vmx_instruction_info
>> 15) & 7;
4029 int index_reg
= (vmx_instruction_info
>> 18) & 0xf;
4030 bool index_is_valid
= !(vmx_instruction_info
& (1u << 22));
4031 int base_reg
= (vmx_instruction_info
>> 23) & 0xf;
4032 bool base_is_valid
= !(vmx_instruction_info
& (1u << 27));
4035 kvm_queue_exception(vcpu
, UD_VECTOR
);
4039 /* Addr = segment_base + offset */
4040 /* offset = base + [index * scale] + displacement */
4041 off
= exit_qualification
; /* holds the displacement */
4043 off
= (gva_t
)sign_extend64(off
, 31);
4044 else if (addr_size
== 0)
4045 off
= (gva_t
)sign_extend64(off
, 15);
4047 off
+= kvm_register_read(vcpu
, base_reg
);
4049 off
+= kvm_register_read(vcpu
, index_reg
)<<scaling
;
4050 vmx_get_segment(vcpu
, &s
, seg_reg
);
4053 * The effective address, i.e. @off, of a memory operand is truncated
4054 * based on the address size of the instruction. Note that this is
4055 * the *effective address*, i.e. the address prior to accounting for
4056 * the segment's base.
4058 if (addr_size
== 1) /* 32 bit */
4060 else if (addr_size
== 0) /* 16 bit */
4063 /* Checks for #GP/#SS exceptions. */
4065 if (is_long_mode(vcpu
)) {
4067 * The virtual/linear address is never truncated in 64-bit
4068 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4069 * address when using FS/GS with a non-zero base.
4071 *ret
= s
.base
+ off
;
4073 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4074 * non-canonical form. This is the only check on the memory
4075 * destination for long mode!
4077 exn
= is_noncanonical_address(*ret
, vcpu
);
4080 * When not in long mode, the virtual/linear address is
4081 * unconditionally truncated to 32 bits regardless of the
4084 *ret
= (s
.base
+ off
) & 0xffffffff;
4086 /* Protected mode: apply checks for segment validity in the
4088 * - segment type check (#GP(0) may be thrown)
4089 * - usability check (#GP(0)/#SS(0))
4090 * - limit check (#GP(0)/#SS(0))
4093 /* #GP(0) if the destination operand is located in a
4094 * read-only data segment or any code segment.
4096 exn
= ((s
.type
& 0xa) == 0 || (s
.type
& 8));
4098 /* #GP(0) if the source operand is located in an
4099 * execute-only code segment
4101 exn
= ((s
.type
& 0xa) == 8);
4103 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
4106 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4108 exn
= (s
.unusable
!= 0);
4111 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4112 * outside the segment limit. All CPUs that support VMX ignore
4113 * limit checks for flat segments, i.e. segments with base==0,
4114 * limit==0xffffffff and of type expand-up data or code.
4116 if (!(s
.base
== 0 && s
.limit
== 0xffffffff &&
4117 ((s
.type
& 8) || !(s
.type
& 4))))
4118 exn
= exn
|| (off
+ sizeof(u64
) > s
.limit
);
4121 kvm_queue_exception_e(vcpu
,
4122 seg_reg
== VCPU_SREG_SS
?
4123 SS_VECTOR
: GP_VECTOR
,
4131 static int nested_vmx_get_vmptr(struct kvm_vcpu
*vcpu
, gpa_t
*vmpointer
)
4134 struct x86_exception e
;
4136 if (get_vmx_mem_address(vcpu
, vmcs_readl(EXIT_QUALIFICATION
),
4137 vmcs_read32(VMX_INSTRUCTION_INFO
), false, &gva
))
4140 if (kvm_read_guest_virt(vcpu
, gva
, vmpointer
, sizeof(*vmpointer
), &e
)) {
4141 kvm_inject_page_fault(vcpu
, &e
);
4149 * Allocate a shadow VMCS and associate it with the currently loaded
4150 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4151 * VMCS is also VMCLEARed, so that it is ready for use.
4153 static struct vmcs
*alloc_shadow_vmcs(struct kvm_vcpu
*vcpu
)
4155 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4156 struct loaded_vmcs
*loaded_vmcs
= vmx
->loaded_vmcs
;
4159 * We should allocate a shadow vmcs for vmcs01 only when L1
4160 * executes VMXON and free it when L1 executes VMXOFF.
4161 * As it is invalid to execute VMXON twice, we shouldn't reach
4162 * here when vmcs01 already have an allocated shadow vmcs.
4164 WARN_ON(loaded_vmcs
== &vmx
->vmcs01
&& loaded_vmcs
->shadow_vmcs
);
4166 if (!loaded_vmcs
->shadow_vmcs
) {
4167 loaded_vmcs
->shadow_vmcs
= alloc_vmcs(true);
4168 if (loaded_vmcs
->shadow_vmcs
)
4169 vmcs_clear(loaded_vmcs
->shadow_vmcs
);
4171 return loaded_vmcs
->shadow_vmcs
;
4174 static int enter_vmx_operation(struct kvm_vcpu
*vcpu
)
4176 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4179 r
= alloc_loaded_vmcs(&vmx
->nested
.vmcs02
);
4183 vmx
->nested
.cached_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4184 if (!vmx
->nested
.cached_vmcs12
)
4185 goto out_cached_vmcs12
;
4187 vmx
->nested
.cached_shadow_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4188 if (!vmx
->nested
.cached_shadow_vmcs12
)
4189 goto out_cached_shadow_vmcs12
;
4191 if (enable_shadow_vmcs
&& !alloc_shadow_vmcs(vcpu
))
4192 goto out_shadow_vmcs
;
4194 hrtimer_init(&vmx
->nested
.preemption_timer
, CLOCK_MONOTONIC
,
4195 HRTIMER_MODE_REL_PINNED
);
4196 vmx
->nested
.preemption_timer
.function
= vmx_preemption_timer_fn
;
4198 vmx
->nested
.vpid02
= allocate_vpid();
4200 vmx
->nested
.vmcs02_initialized
= false;
4201 vmx
->nested
.vmxon
= true;
4203 if (pt_mode
== PT_MODE_HOST_GUEST
) {
4204 vmx
->pt_desc
.guest
.ctl
= 0;
4205 pt_update_intercept_for_msr(vmx
);
4211 kfree(vmx
->nested
.cached_shadow_vmcs12
);
4213 out_cached_shadow_vmcs12
:
4214 kfree(vmx
->nested
.cached_vmcs12
);
4217 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
4224 * Emulate the VMXON instruction.
4225 * Currently, we just remember that VMX is active, and do not save or even
4226 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4227 * do not currently need to store anything in that guest-allocated memory
4228 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4229 * argument is different from the VMXON pointer (which the spec says they do).
4231 static int handle_vmon(struct kvm_vcpu
*vcpu
)
4236 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4237 const u64 VMXON_NEEDED_FEATURES
= FEATURE_CONTROL_LOCKED
4238 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX
;
4241 * The Intel VMX Instruction Reference lists a bunch of bits that are
4242 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4243 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4244 * Otherwise, we should fail with #UD. But most faulting conditions
4245 * have already been checked by hardware, prior to the VM-exit for
4246 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4247 * that bit set to 1 in non-root mode.
4249 if (!kvm_read_cr4_bits(vcpu
, X86_CR4_VMXE
)) {
4250 kvm_queue_exception(vcpu
, UD_VECTOR
);
4254 /* CPL=0 must be checked manually. */
4255 if (vmx_get_cpl(vcpu
)) {
4256 kvm_inject_gp(vcpu
, 0);
4260 if (vmx
->nested
.vmxon
)
4261 return nested_vmx_failValid(vcpu
,
4262 VMXERR_VMXON_IN_VMX_ROOT_OPERATION
);
4264 if ((vmx
->msr_ia32_feature_control
& VMXON_NEEDED_FEATURES
)
4265 != VMXON_NEEDED_FEATURES
) {
4266 kvm_inject_gp(vcpu
, 0);
4270 if (nested_vmx_get_vmptr(vcpu
, &vmptr
))
4275 * The first 4 bytes of VMXON region contain the supported
4276 * VMCS revision identifier
4278 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4279 * which replaces physical address width with 32
4281 if (!page_address_valid(vcpu
, vmptr
))
4282 return nested_vmx_failInvalid(vcpu
);
4284 if (kvm_read_guest(vcpu
->kvm
, vmptr
, &revision
, sizeof(revision
)) ||
4285 revision
!= VMCS12_REVISION
)
4286 return nested_vmx_failInvalid(vcpu
);
4288 vmx
->nested
.vmxon_ptr
= vmptr
;
4289 ret
= enter_vmx_operation(vcpu
);
4293 return nested_vmx_succeed(vcpu
);
4296 static inline void nested_release_vmcs12(struct kvm_vcpu
*vcpu
)
4298 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4300 if (vmx
->nested
.current_vmptr
== -1ull)
4303 if (enable_shadow_vmcs
) {
4304 /* copy to memory all shadowed fields in case
4305 they were modified */
4306 copy_shadow_to_vmcs12(vmx
);
4307 vmx
->nested
.need_vmcs12_sync
= false;
4308 vmx_disable_shadow_vmcs(vmx
);
4310 vmx
->nested
.posted_intr_nv
= -1;
4312 /* Flush VMCS12 to guest memory */
4313 kvm_vcpu_write_guest_page(vcpu
,
4314 vmx
->nested
.current_vmptr
>> PAGE_SHIFT
,
4315 vmx
->nested
.cached_vmcs12
, 0, VMCS12_SIZE
);
4317 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
4319 vmx
->nested
.current_vmptr
= -1ull;
4322 /* Emulate the VMXOFF instruction */
4323 static int handle_vmoff(struct kvm_vcpu
*vcpu
)
4325 if (!nested_vmx_check_permission(vcpu
))
4328 return nested_vmx_succeed(vcpu
);
4331 /* Emulate the VMCLEAR instruction */
4332 static int handle_vmclear(struct kvm_vcpu
*vcpu
)
4334 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4338 if (!nested_vmx_check_permission(vcpu
))
4341 if (nested_vmx_get_vmptr(vcpu
, &vmptr
))
4344 if (!page_address_valid(vcpu
, vmptr
))
4345 return nested_vmx_failValid(vcpu
,
4346 VMXERR_VMCLEAR_INVALID_ADDRESS
);
4348 if (vmptr
== vmx
->nested
.vmxon_ptr
)
4349 return nested_vmx_failValid(vcpu
,
4350 VMXERR_VMCLEAR_VMXON_POINTER
);
4352 if (vmx
->nested
.hv_evmcs_map
.hva
) {
4353 if (vmptr
== vmx
->nested
.hv_evmcs_vmptr
)
4354 nested_release_evmcs(vcpu
);
4356 if (vmptr
== vmx
->nested
.current_vmptr
)
4357 nested_release_vmcs12(vcpu
);
4359 kvm_vcpu_write_guest(vcpu
,
4360 vmptr
+ offsetof(struct vmcs12
,
4362 &zero
, sizeof(zero
));
4365 return nested_vmx_succeed(vcpu
);
4368 static int nested_vmx_run(struct kvm_vcpu
*vcpu
, bool launch
);
4370 /* Emulate the VMLAUNCH instruction */
4371 static int handle_vmlaunch(struct kvm_vcpu
*vcpu
)
4373 return nested_vmx_run(vcpu
, true);
4376 /* Emulate the VMRESUME instruction */
4377 static int handle_vmresume(struct kvm_vcpu
*vcpu
)
4380 return nested_vmx_run(vcpu
, false);
4383 static int handle_vmread(struct kvm_vcpu
*vcpu
)
4385 unsigned long field
;
4387 unsigned long exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
4388 u32 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4390 struct vmcs12
*vmcs12
;
4392 if (!nested_vmx_check_permission(vcpu
))
4395 if (to_vmx(vcpu
)->nested
.current_vmptr
== -1ull)
4396 return nested_vmx_failInvalid(vcpu
);
4398 if (!is_guest_mode(vcpu
))
4399 vmcs12
= get_vmcs12(vcpu
);
4402 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4403 * to shadowed-field sets the ALU flags for VMfailInvalid.
4405 if (get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull)
4406 return nested_vmx_failInvalid(vcpu
);
4407 vmcs12
= get_shadow_vmcs12(vcpu
);
4410 /* Decode instruction info and find the field to read */
4411 field
= kvm_register_readl(vcpu
, (((vmx_instruction_info
) >> 28) & 0xf));
4412 /* Read the field, zero-extended to a u64 field_value */
4413 if (vmcs12_read_any(vmcs12
, field
, &field_value
) < 0)
4414 return nested_vmx_failValid(vcpu
,
4415 VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
4418 * Now copy part of this value to register or memory, as requested.
4419 * Note that the number of bits actually copied is 32 or 64 depending
4420 * on the guest's mode (32 or 64 bit), not on the given field's length.
4422 if (vmx_instruction_info
& (1u << 10)) {
4423 kvm_register_writel(vcpu
, (((vmx_instruction_info
) >> 3) & 0xf),
4426 if (get_vmx_mem_address(vcpu
, exit_qualification
,
4427 vmx_instruction_info
, true, &gva
))
4429 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4430 kvm_write_guest_virt_system(vcpu
, gva
, &field_value
,
4431 (is_long_mode(vcpu
) ? 8 : 4), NULL
);
4434 return nested_vmx_succeed(vcpu
);
4438 static int handle_vmwrite(struct kvm_vcpu
*vcpu
)
4440 unsigned long field
;
4442 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4443 unsigned long exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
4444 u32 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4446 /* The value to write might be 32 or 64 bits, depending on L1's long
4447 * mode, and eventually we need to write that into a field of several
4448 * possible lengths. The code below first zero-extends the value to 64
4449 * bit (field_value), and then copies only the appropriate number of
4450 * bits into the vmcs12 field.
4452 u64 field_value
= 0;
4453 struct x86_exception e
;
4454 struct vmcs12
*vmcs12
;
4456 if (!nested_vmx_check_permission(vcpu
))
4459 if (vmx
->nested
.current_vmptr
== -1ull)
4460 return nested_vmx_failInvalid(vcpu
);
4462 if (vmx_instruction_info
& (1u << 10))
4463 field_value
= kvm_register_readl(vcpu
,
4464 (((vmx_instruction_info
) >> 3) & 0xf));
4466 if (get_vmx_mem_address(vcpu
, exit_qualification
,
4467 vmx_instruction_info
, false, &gva
))
4469 if (kvm_read_guest_virt(vcpu
, gva
, &field_value
,
4470 (is_64_bit_mode(vcpu
) ? 8 : 4), &e
)) {
4471 kvm_inject_page_fault(vcpu
, &e
);
4477 field
= kvm_register_readl(vcpu
, (((vmx_instruction_info
) >> 28) & 0xf));
4479 * If the vCPU supports "VMWRITE to any supported field in the
4480 * VMCS," then the "read-only" fields are actually read/write.
4482 if (vmcs_field_readonly(field
) &&
4483 !nested_cpu_has_vmwrite_any_field(vcpu
))
4484 return nested_vmx_failValid(vcpu
,
4485 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT
);
4487 if (!is_guest_mode(vcpu
))
4488 vmcs12
= get_vmcs12(vcpu
);
4491 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4492 * to shadowed-field sets the ALU flags for VMfailInvalid.
4494 if (get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull)
4495 return nested_vmx_failInvalid(vcpu
);
4496 vmcs12
= get_shadow_vmcs12(vcpu
);
4499 if (vmcs12_write_any(vmcs12
, field
, field_value
) < 0)
4500 return nested_vmx_failValid(vcpu
,
4501 VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
4504 * Do not track vmcs12 dirty-state if in guest-mode
4505 * as we actually dirty shadow vmcs12 instead of vmcs12.
4507 if (!is_guest_mode(vcpu
)) {
4509 #define SHADOW_FIELD_RW(x) case x:
4510 #include "vmcs_shadow_fields.h"
4512 * The fields that can be updated by L1 without a vmexit are
4513 * always updated in the vmcs02, the others go down the slow
4514 * path of prepare_vmcs02.
4518 vmx
->nested
.dirty_vmcs12
= true;
4523 return nested_vmx_succeed(vcpu
);
4526 static void set_current_vmptr(struct vcpu_vmx
*vmx
, gpa_t vmptr
)
4528 vmx
->nested
.current_vmptr
= vmptr
;
4529 if (enable_shadow_vmcs
) {
4530 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL
,
4531 SECONDARY_EXEC_SHADOW_VMCS
);
4532 vmcs_write64(VMCS_LINK_POINTER
,
4533 __pa(vmx
->vmcs01
.shadow_vmcs
));
4534 vmx
->nested
.need_vmcs12_sync
= true;
4536 vmx
->nested
.dirty_vmcs12
= true;
4539 /* Emulate the VMPTRLD instruction */
4540 static int handle_vmptrld(struct kvm_vcpu
*vcpu
)
4542 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4545 if (!nested_vmx_check_permission(vcpu
))
4548 if (nested_vmx_get_vmptr(vcpu
, &vmptr
))
4551 if (!page_address_valid(vcpu
, vmptr
))
4552 return nested_vmx_failValid(vcpu
,
4553 VMXERR_VMPTRLD_INVALID_ADDRESS
);
4555 if (vmptr
== vmx
->nested
.vmxon_ptr
)
4556 return nested_vmx_failValid(vcpu
,
4557 VMXERR_VMPTRLD_VMXON_POINTER
);
4559 /* Forbid normal VMPTRLD if Enlightened version was used */
4560 if (vmx
->nested
.hv_evmcs
)
4563 if (vmx
->nested
.current_vmptr
!= vmptr
) {
4564 struct kvm_host_map map
;
4565 struct vmcs12
*new_vmcs12
;
4567 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmptr
), &map
)) {
4569 * Reads from an unbacked page return all 1s,
4570 * which means that the 32 bits located at the
4571 * given physical address won't match the required
4572 * VMCS12_REVISION identifier.
4574 return nested_vmx_failValid(vcpu
,
4575 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
4578 new_vmcs12
= map
.hva
;
4580 if (new_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
4581 (new_vmcs12
->hdr
.shadow_vmcs
&&
4582 !nested_cpu_has_vmx_shadow_vmcs(vcpu
))) {
4583 kvm_vcpu_unmap(vcpu
, &map
, false);
4584 return nested_vmx_failValid(vcpu
,
4585 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
4588 nested_release_vmcs12(vcpu
);
4591 * Load VMCS12 from guest memory since it is not already
4594 memcpy(vmx
->nested
.cached_vmcs12
, new_vmcs12
, VMCS12_SIZE
);
4595 kvm_vcpu_unmap(vcpu
, &map
, false);
4597 set_current_vmptr(vmx
, vmptr
);
4600 return nested_vmx_succeed(vcpu
);
4603 /* Emulate the VMPTRST instruction */
4604 static int handle_vmptrst(struct kvm_vcpu
*vcpu
)
4606 unsigned long exit_qual
= vmcs_readl(EXIT_QUALIFICATION
);
4607 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4608 gpa_t current_vmptr
= to_vmx(vcpu
)->nested
.current_vmptr
;
4609 struct x86_exception e
;
4612 if (!nested_vmx_check_permission(vcpu
))
4615 if (unlikely(to_vmx(vcpu
)->nested
.hv_evmcs
))
4618 if (get_vmx_mem_address(vcpu
, exit_qual
, instr_info
, true, &gva
))
4620 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4621 if (kvm_write_guest_virt_system(vcpu
, gva
, (void *)¤t_vmptr
,
4622 sizeof(gpa_t
), &e
)) {
4623 kvm_inject_page_fault(vcpu
, &e
);
4626 return nested_vmx_succeed(vcpu
);
4629 /* Emulate the INVEPT instruction */
4630 static int handle_invept(struct kvm_vcpu
*vcpu
)
4632 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4633 u32 vmx_instruction_info
, types
;
4636 struct x86_exception e
;
4641 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
4642 SECONDARY_EXEC_ENABLE_EPT
) ||
4643 !(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_INVEPT_BIT
)) {
4644 kvm_queue_exception(vcpu
, UD_VECTOR
);
4648 if (!nested_vmx_check_permission(vcpu
))
4651 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4652 type
= kvm_register_readl(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
4654 types
= (vmx
->nested
.msrs
.ept_caps
>> VMX_EPT_EXTENT_SHIFT
) & 6;
4656 if (type
>= 32 || !(types
& (1 << type
)))
4657 return nested_vmx_failValid(vcpu
,
4658 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
4660 /* According to the Intel VMX instruction reference, the memory
4661 * operand is read even if it isn't needed (e.g., for type==global)
4663 if (get_vmx_mem_address(vcpu
, vmcs_readl(EXIT_QUALIFICATION
),
4664 vmx_instruction_info
, false, &gva
))
4666 if (kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
)) {
4667 kvm_inject_page_fault(vcpu
, &e
);
4672 case VMX_EPT_EXTENT_GLOBAL
:
4674 * TODO: track mappings and invalidate
4675 * single context requests appropriately
4677 case VMX_EPT_EXTENT_CONTEXT
:
4678 kvm_mmu_sync_roots(vcpu
);
4679 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
4686 return nested_vmx_succeed(vcpu
);
4689 static int handle_invvpid(struct kvm_vcpu
*vcpu
)
4691 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4692 u32 vmx_instruction_info
;
4693 unsigned long type
, types
;
4695 struct x86_exception e
;
4702 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
4703 SECONDARY_EXEC_ENABLE_VPID
) ||
4704 !(vmx
->nested
.msrs
.vpid_caps
& VMX_VPID_INVVPID_BIT
)) {
4705 kvm_queue_exception(vcpu
, UD_VECTOR
);
4709 if (!nested_vmx_check_permission(vcpu
))
4712 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4713 type
= kvm_register_readl(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
4715 types
= (vmx
->nested
.msrs
.vpid_caps
&
4716 VMX_VPID_EXTENT_SUPPORTED_MASK
) >> 8;
4718 if (type
>= 32 || !(types
& (1 << type
)))
4719 return nested_vmx_failValid(vcpu
,
4720 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
4722 /* according to the intel vmx instruction reference, the memory
4723 * operand is read even if it isn't needed (e.g., for type==global)
4725 if (get_vmx_mem_address(vcpu
, vmcs_readl(EXIT_QUALIFICATION
),
4726 vmx_instruction_info
, false, &gva
))
4728 if (kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
)) {
4729 kvm_inject_page_fault(vcpu
, &e
);
4732 if (operand
.vpid
>> 16)
4733 return nested_vmx_failValid(vcpu
,
4734 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
4736 vpid02
= nested_get_vpid02(vcpu
);
4738 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR
:
4739 if (!operand
.vpid
||
4740 is_noncanonical_address(operand
.gla
, vcpu
))
4741 return nested_vmx_failValid(vcpu
,
4742 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
4743 if (cpu_has_vmx_invvpid_individual_addr()) {
4744 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR
,
4745 vpid02
, operand
.gla
);
4747 __vmx_flush_tlb(vcpu
, vpid02
, false);
4749 case VMX_VPID_EXTENT_SINGLE_CONTEXT
:
4750 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL
:
4752 return nested_vmx_failValid(vcpu
,
4753 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
4754 __vmx_flush_tlb(vcpu
, vpid02
, false);
4756 case VMX_VPID_EXTENT_ALL_CONTEXT
:
4757 __vmx_flush_tlb(vcpu
, vpid02
, false);
4761 return kvm_skip_emulated_instruction(vcpu
);
4764 return nested_vmx_succeed(vcpu
);
4767 static int nested_vmx_eptp_switching(struct kvm_vcpu
*vcpu
,
4768 struct vmcs12
*vmcs12
)
4770 u32 index
= kvm_rcx_read(vcpu
);
4772 bool accessed_dirty
;
4773 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
4775 if (!nested_cpu_has_eptp_switching(vmcs12
) ||
4776 !nested_cpu_has_ept(vmcs12
))
4779 if (index
>= VMFUNC_EPTP_ENTRIES
)
4783 if (kvm_vcpu_read_guest_page(vcpu
, vmcs12
->eptp_list_address
>> PAGE_SHIFT
,
4784 &address
, index
* 8, 8))
4787 accessed_dirty
= !!(address
& VMX_EPTP_AD_ENABLE_BIT
);
4790 * If the (L2) guest does a vmfunc to the currently
4791 * active ept pointer, we don't have to do anything else
4793 if (vmcs12
->ept_pointer
!= address
) {
4794 if (!valid_ept_address(vcpu
, address
))
4797 kvm_mmu_unload(vcpu
);
4798 mmu
->ept_ad
= accessed_dirty
;
4799 mmu
->mmu_role
.base
.ad_disabled
= !accessed_dirty
;
4800 vmcs12
->ept_pointer
= address
;
4802 * TODO: Check what's the correct approach in case
4803 * mmu reload fails. Currently, we just let the next
4804 * reload potentially fail
4806 kvm_mmu_reload(vcpu
);
4812 static int handle_vmfunc(struct kvm_vcpu
*vcpu
)
4814 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4815 struct vmcs12
*vmcs12
;
4816 u32 function
= kvm_rax_read(vcpu
);
4819 * VMFUNC is only supported for nested guests, but we always enable the
4820 * secondary control for simplicity; for non-nested mode, fake that we
4821 * didn't by injecting #UD.
4823 if (!is_guest_mode(vcpu
)) {
4824 kvm_queue_exception(vcpu
, UD_VECTOR
);
4828 vmcs12
= get_vmcs12(vcpu
);
4829 if ((vmcs12
->vm_function_control
& (1 << function
)) == 0)
4834 if (nested_vmx_eptp_switching(vcpu
, vmcs12
))
4840 return kvm_skip_emulated_instruction(vcpu
);
4843 nested_vmx_vmexit(vcpu
, vmx
->exit_reason
,
4844 vmcs_read32(VM_EXIT_INTR_INFO
),
4845 vmcs_readl(EXIT_QUALIFICATION
));
4850 static bool nested_vmx_exit_handled_io(struct kvm_vcpu
*vcpu
,
4851 struct vmcs12
*vmcs12
)
4853 unsigned long exit_qualification
;
4854 gpa_t bitmap
, last_bitmap
;
4859 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
4860 return nested_cpu_has(vmcs12
, CPU_BASED_UNCOND_IO_EXITING
);
4862 exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
4864 port
= exit_qualification
>> 16;
4865 size
= (exit_qualification
& 7) + 1;
4867 last_bitmap
= (gpa_t
)-1;
4872 bitmap
= vmcs12
->io_bitmap_a
;
4873 else if (port
< 0x10000)
4874 bitmap
= vmcs12
->io_bitmap_b
;
4877 bitmap
+= (port
& 0x7fff) / 8;
4879 if (last_bitmap
!= bitmap
)
4880 if (kvm_vcpu_read_guest(vcpu
, bitmap
, &b
, 1))
4882 if (b
& (1 << (port
& 7)))
4887 last_bitmap
= bitmap
;
4894 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
4895 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
4896 * disinterest in the current event (read or write a specific MSR) by using an
4897 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
4899 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu
*vcpu
,
4900 struct vmcs12
*vmcs12
, u32 exit_reason
)
4902 u32 msr_index
= kvm_rcx_read(vcpu
);
4905 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
4909 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
4910 * for the four combinations of read/write and low/high MSR numbers.
4911 * First we need to figure out which of the four to use:
4913 bitmap
= vmcs12
->msr_bitmap
;
4914 if (exit_reason
== EXIT_REASON_MSR_WRITE
)
4916 if (msr_index
>= 0xc0000000) {
4917 msr_index
-= 0xc0000000;
4921 /* Then read the msr_index'th bit from this bitmap: */
4922 if (msr_index
< 1024*8) {
4924 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ msr_index
/8, &b
, 1))
4926 return 1 & (b
>> (msr_index
& 7));
4928 return true; /* let L1 handle the wrong parameter */
4932 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
4933 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
4934 * intercept (via guest_host_mask etc.) the current event.
4936 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu
*vcpu
,
4937 struct vmcs12
*vmcs12
)
4939 unsigned long exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
4940 int cr
= exit_qualification
& 15;
4944 switch ((exit_qualification
>> 4) & 3) {
4945 case 0: /* mov to cr */
4946 reg
= (exit_qualification
>> 8) & 15;
4947 val
= kvm_register_readl(vcpu
, reg
);
4950 if (vmcs12
->cr0_guest_host_mask
&
4951 (val
^ vmcs12
->cr0_read_shadow
))
4955 if ((vmcs12
->cr3_target_count
>= 1 &&
4956 vmcs12
->cr3_target_value0
== val
) ||
4957 (vmcs12
->cr3_target_count
>= 2 &&
4958 vmcs12
->cr3_target_value1
== val
) ||
4959 (vmcs12
->cr3_target_count
>= 3 &&
4960 vmcs12
->cr3_target_value2
== val
) ||
4961 (vmcs12
->cr3_target_count
>= 4 &&
4962 vmcs12
->cr3_target_value3
== val
))
4964 if (nested_cpu_has(vmcs12
, CPU_BASED_CR3_LOAD_EXITING
))
4968 if (vmcs12
->cr4_guest_host_mask
&
4969 (vmcs12
->cr4_read_shadow
^ val
))
4973 if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
))
4979 if ((vmcs12
->cr0_guest_host_mask
& X86_CR0_TS
) &&
4980 (vmcs12
->cr0_read_shadow
& X86_CR0_TS
))
4983 case 1: /* mov from cr */
4986 if (vmcs12
->cpu_based_vm_exec_control
&
4987 CPU_BASED_CR3_STORE_EXITING
)
4991 if (vmcs12
->cpu_based_vm_exec_control
&
4992 CPU_BASED_CR8_STORE_EXITING
)
4999 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5000 * cr0. Other attempted changes are ignored, with no exit.
5002 val
= (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f;
5003 if (vmcs12
->cr0_guest_host_mask
& 0xe &
5004 (val
^ vmcs12
->cr0_read_shadow
))
5006 if ((vmcs12
->cr0_guest_host_mask
& 0x1) &&
5007 !(vmcs12
->cr0_read_shadow
& 0x1) &&
5015 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu
*vcpu
,
5016 struct vmcs12
*vmcs12
, gpa_t bitmap
)
5018 u32 vmx_instruction_info
;
5019 unsigned long field
;
5022 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
5025 /* Decode instruction info and find the field to access */
5026 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5027 field
= kvm_register_read(vcpu
, (((vmx_instruction_info
) >> 28) & 0xf));
5029 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5033 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ field
/8, &b
, 1))
5036 return 1 & (b
>> (field
& 7));
5040 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5041 * should handle it ourselves in L0 (and then continue L2). Only call this
5042 * when in is_guest_mode (L2).
5044 bool nested_vmx_exit_reflected(struct kvm_vcpu
*vcpu
, u32 exit_reason
)
5046 u32 intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
5047 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5048 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5050 if (vmx
->nested
.nested_run_pending
)
5053 if (unlikely(vmx
->fail
)) {
5054 pr_info_ratelimited("%s failed vm entry %x\n", __func__
,
5055 vmcs_read32(VM_INSTRUCTION_ERROR
));
5060 * The host physical addresses of some pages of guest memory
5061 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5062 * Page). The CPU may write to these pages via their host
5063 * physical address while L2 is running, bypassing any
5064 * address-translation-based dirty tracking (e.g. EPT write
5067 * Mark them dirty on every exit from L2 to prevent them from
5068 * getting out of sync with dirty tracking.
5070 nested_mark_vmcs12_pages_dirty(vcpu
);
5072 trace_kvm_nested_vmexit(kvm_rip_read(vcpu
), exit_reason
,
5073 vmcs_readl(EXIT_QUALIFICATION
),
5074 vmx
->idt_vectoring_info
,
5076 vmcs_read32(VM_EXIT_INTR_ERROR_CODE
),
5079 switch (exit_reason
) {
5080 case EXIT_REASON_EXCEPTION_NMI
:
5081 if (is_nmi(intr_info
))
5083 else if (is_page_fault(intr_info
))
5084 return !vmx
->vcpu
.arch
.apf
.host_apf_reason
&& enable_ept
;
5085 else if (is_debug(intr_info
) &&
5087 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))
5089 else if (is_breakpoint(intr_info
) &&
5090 vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
5092 return vmcs12
->exception_bitmap
&
5093 (1u << (intr_info
& INTR_INFO_VECTOR_MASK
));
5094 case EXIT_REASON_EXTERNAL_INTERRUPT
:
5096 case EXIT_REASON_TRIPLE_FAULT
:
5098 case EXIT_REASON_PENDING_INTERRUPT
:
5099 return nested_cpu_has(vmcs12
, CPU_BASED_VIRTUAL_INTR_PENDING
);
5100 case EXIT_REASON_NMI_WINDOW
:
5101 return nested_cpu_has(vmcs12
, CPU_BASED_VIRTUAL_NMI_PENDING
);
5102 case EXIT_REASON_TASK_SWITCH
:
5104 case EXIT_REASON_CPUID
:
5106 case EXIT_REASON_HLT
:
5107 return nested_cpu_has(vmcs12
, CPU_BASED_HLT_EXITING
);
5108 case EXIT_REASON_INVD
:
5110 case EXIT_REASON_INVLPG
:
5111 return nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5112 case EXIT_REASON_RDPMC
:
5113 return nested_cpu_has(vmcs12
, CPU_BASED_RDPMC_EXITING
);
5114 case EXIT_REASON_RDRAND
:
5115 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDRAND_EXITING
);
5116 case EXIT_REASON_RDSEED
:
5117 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDSEED_EXITING
);
5118 case EXIT_REASON_RDTSC
: case EXIT_REASON_RDTSCP
:
5119 return nested_cpu_has(vmcs12
, CPU_BASED_RDTSC_EXITING
);
5120 case EXIT_REASON_VMREAD
:
5121 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5122 vmcs12
->vmread_bitmap
);
5123 case EXIT_REASON_VMWRITE
:
5124 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5125 vmcs12
->vmwrite_bitmap
);
5126 case EXIT_REASON_VMCALL
: case EXIT_REASON_VMCLEAR
:
5127 case EXIT_REASON_VMLAUNCH
: case EXIT_REASON_VMPTRLD
:
5128 case EXIT_REASON_VMPTRST
: case EXIT_REASON_VMRESUME
:
5129 case EXIT_REASON_VMOFF
: case EXIT_REASON_VMON
:
5130 case EXIT_REASON_INVEPT
: case EXIT_REASON_INVVPID
:
5132 * VMX instructions trap unconditionally. This allows L1 to
5133 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5136 case EXIT_REASON_CR_ACCESS
:
5137 return nested_vmx_exit_handled_cr(vcpu
, vmcs12
);
5138 case EXIT_REASON_DR_ACCESS
:
5139 return nested_cpu_has(vmcs12
, CPU_BASED_MOV_DR_EXITING
);
5140 case EXIT_REASON_IO_INSTRUCTION
:
5141 return nested_vmx_exit_handled_io(vcpu
, vmcs12
);
5142 case EXIT_REASON_GDTR_IDTR
: case EXIT_REASON_LDTR_TR
:
5143 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_DESC
);
5144 case EXIT_REASON_MSR_READ
:
5145 case EXIT_REASON_MSR_WRITE
:
5146 return nested_vmx_exit_handled_msr(vcpu
, vmcs12
, exit_reason
);
5147 case EXIT_REASON_INVALID_STATE
:
5149 case EXIT_REASON_MWAIT_INSTRUCTION
:
5150 return nested_cpu_has(vmcs12
, CPU_BASED_MWAIT_EXITING
);
5151 case EXIT_REASON_MONITOR_TRAP_FLAG
:
5152 return nested_cpu_has(vmcs12
, CPU_BASED_MONITOR_TRAP_FLAG
);
5153 case EXIT_REASON_MONITOR_INSTRUCTION
:
5154 return nested_cpu_has(vmcs12
, CPU_BASED_MONITOR_EXITING
);
5155 case EXIT_REASON_PAUSE_INSTRUCTION
:
5156 return nested_cpu_has(vmcs12
, CPU_BASED_PAUSE_EXITING
) ||
5157 nested_cpu_has2(vmcs12
,
5158 SECONDARY_EXEC_PAUSE_LOOP_EXITING
);
5159 case EXIT_REASON_MCE_DURING_VMENTRY
:
5161 case EXIT_REASON_TPR_BELOW_THRESHOLD
:
5162 return nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
);
5163 case EXIT_REASON_APIC_ACCESS
:
5164 case EXIT_REASON_APIC_WRITE
:
5165 case EXIT_REASON_EOI_INDUCED
:
5167 * The controls for "virtualize APIC accesses," "APIC-
5168 * register virtualization," and "virtual-interrupt
5169 * delivery" only come from vmcs12.
5172 case EXIT_REASON_EPT_VIOLATION
:
5174 * L0 always deals with the EPT violation. If nested EPT is
5175 * used, and the nested mmu code discovers that the address is
5176 * missing in the guest EPT table (EPT12), the EPT violation
5177 * will be injected with nested_ept_inject_page_fault()
5180 case EXIT_REASON_EPT_MISCONFIG
:
5182 * L2 never uses directly L1's EPT, but rather L0's own EPT
5183 * table (shadow on EPT) or a merged EPT table that L0 built
5184 * (EPT on EPT). So any problems with the structure of the
5185 * table is L0's fault.
5188 case EXIT_REASON_INVPCID
:
5190 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENABLE_INVPCID
) &&
5191 nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5192 case EXIT_REASON_WBINVD
:
5193 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_WBINVD_EXITING
);
5194 case EXIT_REASON_XSETBV
:
5196 case EXIT_REASON_XSAVES
: case EXIT_REASON_XRSTORS
:
5198 * This should never happen, since it is not possible to
5199 * set XSS to a non-zero value---neither in L1 nor in L2.
5200 * If if it were, XSS would have to be checked against
5201 * the XSS exit bitmap in vmcs12.
5203 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_XSAVES
);
5204 case EXIT_REASON_PREEMPTION_TIMER
:
5206 case EXIT_REASON_PML_FULL
:
5207 /* We emulate PML support to L1. */
5209 case EXIT_REASON_VMFUNC
:
5210 /* VM functions are emulated through L2->L0 vmexits. */
5212 case EXIT_REASON_ENCLS
:
5213 /* SGX is never exposed to L1 */
5221 static int vmx_get_nested_state(struct kvm_vcpu
*vcpu
,
5222 struct kvm_nested_state __user
*user_kvm_nested_state
,
5225 struct vcpu_vmx
*vmx
;
5226 struct vmcs12
*vmcs12
;
5227 struct kvm_nested_state kvm_state
= {
5230 .size
= sizeof(kvm_state
),
5231 .vmx
.vmxon_pa
= -1ull,
5232 .vmx
.vmcs_pa
= -1ull,
5236 return kvm_state
.size
+ 2 * VMCS12_SIZE
;
5239 vmcs12
= get_vmcs12(vcpu
);
5241 if (nested_vmx_allowed(vcpu
) && vmx
->nested
.enlightened_vmcs_enabled
)
5242 kvm_state
.flags
|= KVM_STATE_NESTED_EVMCS
;
5244 if (nested_vmx_allowed(vcpu
) &&
5245 (vmx
->nested
.vmxon
|| vmx
->nested
.smm
.vmxon
)) {
5246 kvm_state
.vmx
.vmxon_pa
= vmx
->nested
.vmxon_ptr
;
5247 kvm_state
.vmx
.vmcs_pa
= vmx
->nested
.current_vmptr
;
5249 if (vmx_has_valid_vmcs12(vcpu
)) {
5250 kvm_state
.size
+= VMCS12_SIZE
;
5252 if (is_guest_mode(vcpu
) &&
5253 nested_cpu_has_shadow_vmcs(vmcs12
) &&
5254 vmcs12
->vmcs_link_pointer
!= -1ull)
5255 kvm_state
.size
+= VMCS12_SIZE
;
5258 if (vmx
->nested
.smm
.vmxon
)
5259 kvm_state
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_VMXON
;
5261 if (vmx
->nested
.smm
.guest_mode
)
5262 kvm_state
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_GUEST_MODE
;
5264 if (is_guest_mode(vcpu
)) {
5265 kvm_state
.flags
|= KVM_STATE_NESTED_GUEST_MODE
;
5267 if (vmx
->nested
.nested_run_pending
)
5268 kvm_state
.flags
|= KVM_STATE_NESTED_RUN_PENDING
;
5272 if (user_data_size
< kvm_state
.size
)
5275 if (copy_to_user(user_kvm_nested_state
, &kvm_state
, sizeof(kvm_state
)))
5278 if (!vmx_has_valid_vmcs12(vcpu
))
5282 * When running L2, the authoritative vmcs12 state is in the
5283 * vmcs02. When running L1, the authoritative vmcs12 state is
5284 * in the shadow or enlightened vmcs linked to vmcs01, unless
5285 * need_vmcs12_sync is set, in which case, the authoritative
5286 * vmcs12 state is in the vmcs12 already.
5288 if (is_guest_mode(vcpu
)) {
5289 sync_vmcs12(vcpu
, vmcs12
);
5290 } else if (!vmx
->nested
.need_vmcs12_sync
) {
5291 if (vmx
->nested
.hv_evmcs
)
5292 copy_enlightened_to_vmcs12(vmx
);
5293 else if (enable_shadow_vmcs
)
5294 copy_shadow_to_vmcs12(vmx
);
5298 * Copy over the full allocated size of vmcs12 rather than just the size
5301 if (copy_to_user(user_kvm_nested_state
->data
, vmcs12
, VMCS12_SIZE
))
5304 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
5305 vmcs12
->vmcs_link_pointer
!= -1ull) {
5306 if (copy_to_user(user_kvm_nested_state
->data
+ VMCS12_SIZE
,
5307 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
))
5312 return kvm_state
.size
;
5316 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5318 void vmx_leave_nested(struct kvm_vcpu
*vcpu
)
5320 if (is_guest_mode(vcpu
)) {
5321 to_vmx(vcpu
)->nested
.nested_run_pending
= 0;
5322 nested_vmx_vmexit(vcpu
, -1, 0, 0);
5327 static int vmx_set_nested_state(struct kvm_vcpu
*vcpu
,
5328 struct kvm_nested_state __user
*user_kvm_nested_state
,
5329 struct kvm_nested_state
*kvm_state
)
5331 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5332 struct vmcs12
*vmcs12
;
5336 if (kvm_state
->format
!= 0)
5339 if (!nested_vmx_allowed(vcpu
))
5340 return kvm_state
->vmx
.vmxon_pa
== -1ull ? 0 : -EINVAL
;
5342 if (kvm_state
->vmx
.vmxon_pa
== -1ull) {
5343 if (kvm_state
->vmx
.smm
.flags
)
5346 if (kvm_state
->vmx
.vmcs_pa
!= -1ull)
5349 vmx_leave_nested(vcpu
);
5353 if (!page_address_valid(vcpu
, kvm_state
->vmx
.vmxon_pa
))
5356 if ((kvm_state
->vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
5357 (kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
5360 if (kvm_state
->vmx
.smm
.flags
&
5361 ~(KVM_STATE_NESTED_SMM_GUEST_MODE
| KVM_STATE_NESTED_SMM_VMXON
))
5365 * SMM temporarily disables VMX, so we cannot be in guest mode,
5366 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5369 if (is_smm(vcpu
) ? kvm_state
->flags
: kvm_state
->vmx
.smm
.flags
)
5372 if ((kvm_state
->vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
5373 !(kvm_state
->vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
))
5376 vmx_leave_nested(vcpu
);
5377 if (kvm_state
->vmx
.vmxon_pa
== -1ull)
5380 if (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
)
5381 nested_enable_evmcs(vcpu
, NULL
);
5383 vmx
->nested
.vmxon_ptr
= kvm_state
->vmx
.vmxon_pa
;
5384 ret
= enter_vmx_operation(vcpu
);
5388 /* Empty 'VMXON' state is permitted */
5389 if (kvm_state
->size
< sizeof(*kvm_state
) + sizeof(*vmcs12
))
5392 if (kvm_state
->vmx
.vmcs_pa
!= -1ull) {
5393 if (kvm_state
->vmx
.vmcs_pa
== kvm_state
->vmx
.vmxon_pa
||
5394 !page_address_valid(vcpu
, kvm_state
->vmx
.vmcs_pa
))
5397 set_current_vmptr(vmx
, kvm_state
->vmx
.vmcs_pa
);
5398 } else if (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) {
5400 * Sync eVMCS upon entry as we may not have
5401 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5403 vmx
->nested
.need_vmcs12_sync
= true;
5408 if (kvm_state
->vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
) {
5409 vmx
->nested
.smm
.vmxon
= true;
5410 vmx
->nested
.vmxon
= false;
5412 if (kvm_state
->vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
)
5413 vmx
->nested
.smm
.guest_mode
= true;
5416 vmcs12
= get_vmcs12(vcpu
);
5417 if (copy_from_user(vmcs12
, user_kvm_nested_state
->data
, sizeof(*vmcs12
)))
5420 if (vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
)
5423 if (!(kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
5426 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
5427 vmcs12
->vmcs_link_pointer
!= -1ull) {
5428 struct vmcs12
*shadow_vmcs12
= get_shadow_vmcs12(vcpu
);
5430 if (kvm_state
->size
< sizeof(*kvm_state
) + 2 * sizeof(*vmcs12
))
5433 if (copy_from_user(shadow_vmcs12
,
5434 user_kvm_nested_state
->data
+ VMCS12_SIZE
,
5438 if (shadow_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
5439 !shadow_vmcs12
->hdr
.shadow_vmcs
)
5443 if (nested_vmx_check_controls(vcpu
, vmcs12
) ||
5444 nested_vmx_check_host_state(vcpu
, vmcs12
) ||
5445 nested_vmx_check_guest_state(vcpu
, vmcs12
, &exit_qual
))
5448 vmx
->nested
.dirty_vmcs12
= true;
5449 vmx
->nested
.nested_run_pending
=
5450 !!(kvm_state
->flags
& KVM_STATE_NESTED_RUN_PENDING
);
5452 ret
= nested_vmx_enter_non_root_mode(vcpu
, false);
5454 vmx
->nested
.nested_run_pending
= 0;
5461 void nested_vmx_vcpu_setup(void)
5463 if (enable_shadow_vmcs
) {
5465 * At vCPU creation, "VMWRITE to any supported field
5466 * in the VMCS" is supported, so use the more
5467 * permissive vmx_vmread_bitmap to specify both read
5468 * and write permissions for the shadow VMCS.
5470 vmcs_write64(VMREAD_BITMAP
, __pa(vmx_vmread_bitmap
));
5471 vmcs_write64(VMWRITE_BITMAP
, __pa(vmx_vmread_bitmap
));
5476 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5477 * returned for the various VMX controls MSRs when nested VMX is enabled.
5478 * The same values should also be used to verify that vmcs12 control fields are
5479 * valid during nested entry from L1 to L2.
5480 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5481 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5482 * bit in the high half is on if the corresponding bit in the control field
5483 * may be on. See also vmx_control_verify().
5485 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs
*msrs
, u32 ept_caps
,
5489 * Note that as a general rule, the high half of the MSRs (bits in
5490 * the control fields which may be 1) should be initialized by the
5491 * intersection of the underlying hardware's MSR (i.e., features which
5492 * can be supported) and the list of features we want to expose -
5493 * because they are known to be properly supported in our code.
5494 * Also, usually, the low half of the MSRs (bits which must be 1) can
5495 * be set to 0, meaning that L1 may turn off any of these bits. The
5496 * reason is that if one of these bits is necessary, it will appear
5497 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5498 * fields of vmcs01 and vmcs02, will turn these bits off - and
5499 * nested_vmx_exit_reflected() will not pass related exits to L1.
5500 * These rules have exceptions below.
5503 /* pin-based controls */
5504 rdmsr(MSR_IA32_VMX_PINBASED_CTLS
,
5505 msrs
->pinbased_ctls_low
,
5506 msrs
->pinbased_ctls_high
);
5507 msrs
->pinbased_ctls_low
|=
5508 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
5509 msrs
->pinbased_ctls_high
&=
5510 PIN_BASED_EXT_INTR_MASK
|
5511 PIN_BASED_NMI_EXITING
|
5512 PIN_BASED_VIRTUAL_NMIS
|
5513 (apicv
? PIN_BASED_POSTED_INTR
: 0);
5514 msrs
->pinbased_ctls_high
|=
5515 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
5516 PIN_BASED_VMX_PREEMPTION_TIMER
;
5519 rdmsr(MSR_IA32_VMX_EXIT_CTLS
,
5520 msrs
->exit_ctls_low
,
5521 msrs
->exit_ctls_high
);
5522 msrs
->exit_ctls_low
=
5523 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
5525 msrs
->exit_ctls_high
&=
5526 #ifdef CONFIG_X86_64
5527 VM_EXIT_HOST_ADDR_SPACE_SIZE
|
5529 VM_EXIT_LOAD_IA32_PAT
| VM_EXIT_SAVE_IA32_PAT
;
5530 msrs
->exit_ctls_high
|=
5531 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
|
5532 VM_EXIT_LOAD_IA32_EFER
| VM_EXIT_SAVE_IA32_EFER
|
5533 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
| VM_EXIT_ACK_INTR_ON_EXIT
;
5535 /* We support free control of debug control saving. */
5536 msrs
->exit_ctls_low
&= ~VM_EXIT_SAVE_DEBUG_CONTROLS
;
5538 /* entry controls */
5539 rdmsr(MSR_IA32_VMX_ENTRY_CTLS
,
5540 msrs
->entry_ctls_low
,
5541 msrs
->entry_ctls_high
);
5542 msrs
->entry_ctls_low
=
5543 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
5544 msrs
->entry_ctls_high
&=
5545 #ifdef CONFIG_X86_64
5546 VM_ENTRY_IA32E_MODE
|
5548 VM_ENTRY_LOAD_IA32_PAT
;
5549 msrs
->entry_ctls_high
|=
5550 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
| VM_ENTRY_LOAD_IA32_EFER
);
5552 /* We support free control of debug control loading. */
5553 msrs
->entry_ctls_low
&= ~VM_ENTRY_LOAD_DEBUG_CONTROLS
;
5555 /* cpu-based controls */
5556 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
,
5557 msrs
->procbased_ctls_low
,
5558 msrs
->procbased_ctls_high
);
5559 msrs
->procbased_ctls_low
=
5560 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
5561 msrs
->procbased_ctls_high
&=
5562 CPU_BASED_VIRTUAL_INTR_PENDING
|
5563 CPU_BASED_VIRTUAL_NMI_PENDING
| CPU_BASED_USE_TSC_OFFSETING
|
5564 CPU_BASED_HLT_EXITING
| CPU_BASED_INVLPG_EXITING
|
5565 CPU_BASED_MWAIT_EXITING
| CPU_BASED_CR3_LOAD_EXITING
|
5566 CPU_BASED_CR3_STORE_EXITING
|
5567 #ifdef CONFIG_X86_64
5568 CPU_BASED_CR8_LOAD_EXITING
| CPU_BASED_CR8_STORE_EXITING
|
5570 CPU_BASED_MOV_DR_EXITING
| CPU_BASED_UNCOND_IO_EXITING
|
5571 CPU_BASED_USE_IO_BITMAPS
| CPU_BASED_MONITOR_TRAP_FLAG
|
5572 CPU_BASED_MONITOR_EXITING
| CPU_BASED_RDPMC_EXITING
|
5573 CPU_BASED_RDTSC_EXITING
| CPU_BASED_PAUSE_EXITING
|
5574 CPU_BASED_TPR_SHADOW
| CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
5576 * We can allow some features even when not supported by the
5577 * hardware. For example, L1 can specify an MSR bitmap - and we
5578 * can use it to avoid exits to L1 - even when L0 runs L2
5579 * without MSR bitmaps.
5581 msrs
->procbased_ctls_high
|=
5582 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
5583 CPU_BASED_USE_MSR_BITMAPS
;
5585 /* We support free control of CR3 access interception. */
5586 msrs
->procbased_ctls_low
&=
5587 ~(CPU_BASED_CR3_LOAD_EXITING
| CPU_BASED_CR3_STORE_EXITING
);
5590 * secondary cpu-based controls. Do not include those that
5591 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5593 if (msrs
->procbased_ctls_high
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)
5594 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
5595 msrs
->secondary_ctls_low
,
5596 msrs
->secondary_ctls_high
);
5598 msrs
->secondary_ctls_low
= 0;
5599 msrs
->secondary_ctls_high
&=
5600 SECONDARY_EXEC_DESC
|
5601 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
5602 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
5603 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
5604 SECONDARY_EXEC_WBINVD_EXITING
;
5607 * We can emulate "VMCS shadowing," even if the hardware
5608 * doesn't support it.
5610 msrs
->secondary_ctls_high
|=
5611 SECONDARY_EXEC_SHADOW_VMCS
;
5614 /* nested EPT: emulate EPT also to L1 */
5615 msrs
->secondary_ctls_high
|=
5616 SECONDARY_EXEC_ENABLE_EPT
;
5617 msrs
->ept_caps
= VMX_EPT_PAGE_WALK_4_BIT
|
5618 VMX_EPTP_WB_BIT
| VMX_EPT_INVEPT_BIT
;
5619 if (cpu_has_vmx_ept_execute_only())
5621 VMX_EPT_EXECUTE_ONLY_BIT
;
5622 msrs
->ept_caps
&= ept_caps
;
5623 msrs
->ept_caps
|= VMX_EPT_EXTENT_GLOBAL_BIT
|
5624 VMX_EPT_EXTENT_CONTEXT_BIT
| VMX_EPT_2MB_PAGE_BIT
|
5625 VMX_EPT_1GB_PAGE_BIT
;
5626 if (enable_ept_ad_bits
) {
5627 msrs
->secondary_ctls_high
|=
5628 SECONDARY_EXEC_ENABLE_PML
;
5629 msrs
->ept_caps
|= VMX_EPT_AD_BIT
;
5633 if (cpu_has_vmx_vmfunc()) {
5634 msrs
->secondary_ctls_high
|=
5635 SECONDARY_EXEC_ENABLE_VMFUNC
;
5637 * Advertise EPTP switching unconditionally
5638 * since we emulate it
5641 msrs
->vmfunc_controls
=
5642 VMX_VMFUNC_EPTP_SWITCHING
;
5646 * Old versions of KVM use the single-context version without
5647 * checking for support, so declare that it is supported even
5648 * though it is treated as global context. The alternative is
5649 * not failing the single-context invvpid, and it is worse.
5652 msrs
->secondary_ctls_high
|=
5653 SECONDARY_EXEC_ENABLE_VPID
;
5654 msrs
->vpid_caps
= VMX_VPID_INVVPID_BIT
|
5655 VMX_VPID_EXTENT_SUPPORTED_MASK
;
5658 if (enable_unrestricted_guest
)
5659 msrs
->secondary_ctls_high
|=
5660 SECONDARY_EXEC_UNRESTRICTED_GUEST
;
5662 if (flexpriority_enabled
)
5663 msrs
->secondary_ctls_high
|=
5664 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
5666 /* miscellaneous data */
5667 rdmsr(MSR_IA32_VMX_MISC
,
5670 msrs
->misc_low
&= VMX_MISC_SAVE_EFER_LMA
;
5672 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS
|
5673 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
|
5674 VMX_MISC_ACTIVITY_HLT
;
5675 msrs
->misc_high
= 0;
5678 * This MSR reports some information about VMX support. We
5679 * should return information about the VMX we emulate for the
5680 * guest, and the VMCS structure we give it - not about the
5681 * VMX support of the underlying hardware.
5685 VMX_BASIC_TRUE_CTLS
|
5686 ((u64
)VMCS12_SIZE
<< VMX_BASIC_VMCS_SIZE_SHIFT
) |
5687 (VMX_BASIC_MEM_TYPE_WB
<< VMX_BASIC_MEM_TYPE_SHIFT
);
5689 if (cpu_has_vmx_basic_inout())
5690 msrs
->basic
|= VMX_BASIC_INOUT
;
5693 * These MSRs specify bits which the guest must keep fixed on
5694 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5695 * We picked the standard core2 setting.
5697 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5698 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
5699 msrs
->cr0_fixed0
= VMXON_CR0_ALWAYSON
;
5700 msrs
->cr4_fixed0
= VMXON_CR4_ALWAYSON
;
5702 /* These MSRs specify bits which the guest must keep fixed off. */
5703 rdmsrl(MSR_IA32_VMX_CR0_FIXED1
, msrs
->cr0_fixed1
);
5704 rdmsrl(MSR_IA32_VMX_CR4_FIXED1
, msrs
->cr4_fixed1
);
5706 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5707 msrs
->vmcs_enum
= VMCS12_MAX_FIELD_INDEX
<< 1;
5710 void nested_vmx_hardware_unsetup(void)
5714 if (enable_shadow_vmcs
) {
5715 for (i
= 0; i
< VMX_BITMAP_NR
; i
++)
5716 free_page((unsigned long)vmx_bitmap
[i
]);
5720 __init
int nested_vmx_hardware_setup(int (*exit_handlers
[])(struct kvm_vcpu
*))
5725 * Without EPT it is not possible to restore L1's CR3 and PDPTR on
5726 * VMfail, because they are not available in vmcs01. Just always
5727 * use hardware checks.
5730 nested_early_check
= 1;
5732 if (!cpu_has_vmx_shadow_vmcs())
5733 enable_shadow_vmcs
= 0;
5734 if (enable_shadow_vmcs
) {
5735 for (i
= 0; i
< VMX_BITMAP_NR
; i
++) {
5737 * The vmx_bitmap is not tied to a VM and so should
5738 * not be charged to a memcg.
5740 vmx_bitmap
[i
] = (unsigned long *)
5741 __get_free_page(GFP_KERNEL
);
5742 if (!vmx_bitmap
[i
]) {
5743 nested_vmx_hardware_unsetup();
5748 init_vmcs_shadow_fields();
5751 exit_handlers
[EXIT_REASON_VMCLEAR
] = handle_vmclear
,
5752 exit_handlers
[EXIT_REASON_VMLAUNCH
] = handle_vmlaunch
,
5753 exit_handlers
[EXIT_REASON_VMPTRLD
] = handle_vmptrld
,
5754 exit_handlers
[EXIT_REASON_VMPTRST
] = handle_vmptrst
,
5755 exit_handlers
[EXIT_REASON_VMREAD
] = handle_vmread
,
5756 exit_handlers
[EXIT_REASON_VMRESUME
] = handle_vmresume
,
5757 exit_handlers
[EXIT_REASON_VMWRITE
] = handle_vmwrite
,
5758 exit_handlers
[EXIT_REASON_VMOFF
] = handle_vmoff
,
5759 exit_handlers
[EXIT_REASON_VMON
] = handle_vmon
,
5760 exit_handlers
[EXIT_REASON_INVEPT
] = handle_invept
,
5761 exit_handlers
[EXIT_REASON_INVVPID
] = handle_invvpid
,
5762 exit_handlers
[EXIT_REASON_VMFUNC
] = handle_vmfunc
,
5764 kvm_x86_ops
->check_nested_events
= vmx_check_nested_events
;
5765 kvm_x86_ops
->get_nested_state
= vmx_get_nested_state
;
5766 kvm_x86_ops
->set_nested_state
= vmx_set_nested_state
;
5767 kvm_x86_ops
->get_vmcs12_pages
= nested_get_vmcs12_pages
,
5768 kvm_x86_ops
->nested_enable_evmcs
= nested_enable_evmcs
;
5769 kvm_x86_ops
->nested_get_evmcs_version
= nested_get_evmcs_version
;