]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/vmx/vmx.h
KVM: x86/speculation: Disable Fill buffer clear within guests
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / vmx / vmx.h
CommitLineData
8373d25d
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
f99e3daf 8#include <asm/intel_pt.h>
8373d25d
SC
9
10#include "capabilities.h"
e5d03de5 11#include "kvm_cache_regs.h"
8888cdd0 12#include "posted_intr.h"
8373d25d 13#include "vmcs.h"
5a085326 14#include "vmx_ops.h"
1dbf5d68 15#include "cpuid.h"
8373d25d
SC
16
17#define MSR_TYPE_R 1
18#define MSR_TYPE_W 2
19#define MSR_TYPE_RW 3
20
21#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
7d73710d 23#ifdef CONFIG_X86_64
eb3db1b1 24#define MAX_NR_USER_RETURN_MSRS 7
7d73710d 25#else
eb3db1b1 26#define MAX_NR_USER_RETURN_MSRS 4
7d73710d
JM
27#endif
28
ce833b23 29#define MAX_NR_LOADSTORE_MSRS 8
8373d25d
SC
30
31struct vmx_msrs {
32 unsigned int nr;
ce833b23 33 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
8373d25d
SC
34};
35
eb3db1b1 36struct vmx_uret_msr {
ee9d22e0 37 bool load_into_hardware;
8373d25d
SC
38 u64 data;
39 u64 mask;
40};
41
42enum segment_cache_field {
43 SEG_FIELD_SEL = 0,
44 SEG_FIELD_BASE = 1,
45 SEG_FIELD_LIMIT = 2,
46 SEG_FIELD_AR = 3,
47
48 SEG_FIELD_NR = 4
49};
50
2ef444f1
CP
51#define RTIT_ADDR_RANGE 4
52
53struct pt_ctx {
54 u64 ctl;
55 u64 status;
56 u64 output_base;
57 u64 output_mask;
58 u64 cr3_match;
59 u64 addr_a[RTIT_ADDR_RANGE];
60 u64 addr_b[RTIT_ADDR_RANGE];
61};
62
63struct pt_desc {
64 u64 ctl_bitmask;
65 u32 addr_range;
66 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
67 struct pt_ctx host;
68 struct pt_ctx guest;
69};
8373d25d 70
8e533240
SC
71union vmx_exit_reason {
72 struct {
73 u32 basic : 16;
74 u32 reserved16 : 1;
75 u32 reserved17 : 1;
76 u32 reserved18 : 1;
77 u32 reserved19 : 1;
78 u32 reserved20 : 1;
79 u32 reserved21 : 1;
80 u32 reserved22 : 1;
81 u32 reserved23 : 1;
82 u32 reserved24 : 1;
83 u32 reserved25 : 1;
fe6b6bc8 84 u32 bus_lock_detected : 1;
8e533240
SC
85 u32 enclave_mode : 1;
86 u32 smi_pending_mtf : 1;
87 u32 smi_from_vmx_root : 1;
88 u32 reserved30 : 1;
89 u32 failed_vmentry : 1;
90 };
91 u32 full;
92};
93
9c9520ce
PB
94#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
95#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
96
97bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
c6462363 98bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
9c9520ce 99
8e12911b 100int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
1b5ac322 101void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
8e12911b 102
9c9520ce
PB
103struct lbr_desc {
104 /* Basic info about guest LBR records. */
105 struct x86_pmu_lbr records;
8e12911b
LX
106
107 /*
108 * Emulate LBR feature via passthrough LBR registers when the
109 * per-vcpu guest LBR event is scheduled on the current pcpu.
110 *
111 * The records may be inaccurate if the host reclaims the LBR.
112 */
113 struct perf_event *event;
9254beaa
LX
114
115 /* True if LBRs are marked as not intercepted in the MSR bitmap */
116 bool msr_passthrough;
9c9520ce
PB
117};
118
8373d25d
SC
119/*
120 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
121 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
122 */
123struct nested_vmx {
124 /* Has the level1 guest done vmxon? */
125 bool vmxon;
126 gpa_t vmxon_ptr;
127 bool pml_full;
128
129 /* The guest-physical address of the current VMCS L1 keeps for L2 */
130 gpa_t current_vmptr;
131 /*
132 * Cache of the guest's VMCS, existing outside of guest memory.
133 * Loaded from guest memory during VMPTRLD. Flushed to guest
134 * memory during VMCLEAR and VMPTRLD.
135 */
136 struct vmcs12 *cached_vmcs12;
137 /*
138 * Cache of the guest's shadow VMCS, existing outside of guest
139 * memory. Loaded from guest memory during VM entry. Flushed
140 * to guest memory during VM exit.
141 */
142 struct vmcs12 *cached_shadow_vmcs12;
7952d769 143
8373d25d
SC
144 /*
145 * Indicates if the shadow vmcs or enlightened vmcs must be updated
146 * with the data held by struct vmcs12.
147 */
3731905e 148 bool need_vmcs12_to_shadow_sync;
8373d25d
SC
149 bool dirty_vmcs12;
150
7952d769
SC
151 /*
152 * Indicates lazily loaded guest state has not yet been decached from
153 * vmcs02.
154 */
155 bool need_sync_vmcs02_to_vmcs12_rare;
156
8373d25d
SC
157 /*
158 * vmcs02 has been initialized, i.e. state that is constant for
159 * vmcs02 has been written to the backing VMCS. Initialization
160 * is delayed until L1 actually attempts to run a nested VM.
161 */
162 bool vmcs02_initialized;
163
164 bool change_vmcs01_virtual_apic_mode;
1196cb97 165 bool reload_vmcs01_apic_access_page;
a85863c2 166 bool update_vmcs01_cpu_dirty_logging;
8373d25d
SC
167
168 /*
169 * Enlightened VMCS has been enabled. It does not mean that L1 has to
170 * use it. However, VMX features available to L1 will be limited based
171 * on what the enlightened VMCS supports.
172 */
173 bool enlightened_vmcs_enabled;
174
175 /* L2 must run next, and mustn't decide to exit to L1. */
176 bool nested_run_pending;
177
5ef8acbd
OU
178 /* Pending MTF VM-exit into L1. */
179 bool mtf_pending;
180
8373d25d
SC
181 struct loaded_vmcs vmcs02;
182
183 /*
184 * Guest pages referred to in the vmcs02 with host-physical
185 * pointers, so we must keep them pinned while L2 runs.
186 */
187 struct page *apic_access_page;
96c66e87 188 struct kvm_host_map virtual_apic_map;
3278e049 189 struct kvm_host_map pi_desc_map;
31f0b6c4
KA
190
191 struct kvm_host_map msr_bitmap_map;
192
8373d25d
SC
193 struct pi_desc *pi_desc;
194 bool pi_pending;
195 u16 posted_intr_nv;
196
197 struct hrtimer preemption_timer;
850448f3
PS
198 u64 preemption_timer_deadline;
199 bool has_preemption_timer_deadline;
8373d25d
SC
200 bool preemption_timer_expired;
201
202 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
203 u64 vmcs01_debugctl;
204 u64 vmcs01_guest_bndcfgs;
205
02d496cf
LA
206 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
207 int l1_tpr_threshold;
208
8373d25d
SC
209 u16 vpid02;
210 u16 last_vpid;
211
212 struct nested_vmx_msrs msrs;
213
214 /* SMM related state */
215 struct {
216 /* in VMX operation on SMM entry? */
217 bool vmxon;
218 /* in guest mode on SMM entry? */
219 bool guest_mode;
220 } smm;
221
222 gpa_t hv_evmcs_vmptr;
dee9c049 223 struct kvm_host_map hv_evmcs_map;
8373d25d
SC
224 struct hv_enlightened_vmcs *hv_evmcs;
225};
226
227struct vcpu_vmx {
228 struct kvm_vcpu vcpu;
8373d25d 229 u8 fail;
84ec8d2d 230 u8 x2apic_msr_bitmap_mode;
b464f57e
PB
231
232 /*
233 * If true, host state has been stored in vmx->loaded_vmcs for
234 * the CPU registers that only need to be switched when transitioning
235 * to/from the kernel, and the registers have been loaded with guest
236 * values. If false, host state is loaded in the CPU registers
237 * and vmx->loaded_vmcs->host_state is invalid.
238 */
239 bool guest_state_loaded;
240
5addc235 241 unsigned long exit_qualification;
8373d25d
SC
242 u32 exit_intr_info;
243 u32 idt_vectoring_info;
244 ulong rflags;
70f932ec 245
b6194b94
SC
246 /*
247 * User return MSRs are always emulated when enabled in the guest, but
248 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
249 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
250 * be loaded into hardware if those conditions aren't met.
b6194b94 251 */
eb3db1b1 252 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
658ece84 253 bool guest_uret_msrs_loaded;
8373d25d
SC
254#ifdef CONFIG_X86_64
255 u64 msr_host_kernel_gs_base;
256 u64 msr_guest_kernel_gs_base;
257#endif
258
8373d25d 259 u64 spec_ctrl;
6e3ba4ab 260 u32 msr_ia32_umwait_control;
8373d25d 261
8373d25d
SC
262 /*
263 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
264 * non-nested (L1) guest, it always points to vmcs01. For a nested
b464f57e 265 * guest (L2), it points to a different VMCS.
8373d25d
SC
266 */
267 struct loaded_vmcs vmcs01;
268 struct loaded_vmcs *loaded_vmcs;
c9afc58c 269
8373d25d
SC
270 struct msr_autoload {
271 struct vmx_msrs guest;
272 struct vmx_msrs host;
273 } msr_autoload;
274
662f1d1d
AL
275 struct msr_autostore {
276 struct vmx_msrs guest;
277 } msr_autostore;
278
8373d25d
SC
279 struct {
280 int vm86_active;
281 ulong save_rflags;
282 struct kvm_segment segs[8];
283 } rmode;
284 struct {
285 u32 bitmask; /* 4 bits per segment (1 bit per field) */
286 struct kvm_save_segment {
287 u16 selector;
288 unsigned long base;
289 u32 limit;
290 u32 ar;
291 } seg[8];
292 } segment_cache;
293 int vpid;
294 bool emulation_required;
295
8e533240 296 union vmx_exit_reason exit_reason;
8373d25d
SC
297
298 /* Posted interrupt descriptor */
299 struct pi_desc pi_desc;
300
301 /* Support for a guest hypervisor (nested VMX) */
302 struct nested_vmx nested;
303
304 /* Dynamic PLE window. */
c5c5d6fa 305 unsigned int ple_window;
8373d25d
SC
306 bool ple_window_dirty;
307
308 bool req_immediate_exit;
309
310 /* Support for PML */
311#define PML_ENTITY_NUM 512
312 struct page *pml_pg;
313
314 /* apic deadline value in host tsc */
315 u64 hv_deadline_tsc;
316
8373d25d
SC
317 unsigned long host_debugctlmsr;
318
319 /*
320 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
32ad73db 321 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
8373d25d
SC
322 * in msr_ia32_feature_control_valid_bits.
323 */
324 u64 msr_ia32_feature_control;
325 u64 msr_ia32_feature_control_valid_bits;
8f102445
SC
326 /* SGX Launch Control public key hash */
327 u64 msr_ia32_sgxlepubkeyhash[4];
44622830
PG
328 u64 msr_ia32_mcu_opt_ctrl;
329 bool disable_fb_clear;
8f102445 330
2ef444f1 331 struct pt_desc pt_desc;
9c9520ce 332 struct lbr_desc lbr_desc;
3eb90017
AG
333
334 /* Save desired MSR intercept (read: pass-through) state */
335#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
336 struct {
337 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
338 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
339 } shadow_msr_intercept;
8373d25d
SC
340};
341
8373d25d
SC
342struct kvm_vmx {
343 struct kvm kvm;
344
345 unsigned int tss_addr;
346 bool ept_identity_pagetable_done;
347 gpa_t ept_identity_map_addr;
8373d25d
SC
348};
349
7c97fcb3 350bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
5c911bef
SC
351void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
352 struct loaded_vmcs *buddy);
97b7ead3
SC
353int allocate_vpid(void);
354void free_vpid(int vpid);
355void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
356void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
13b964a2
SC
357void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
358 unsigned long fs_base, unsigned long gs_base);
97b7ead3 359int vmx_get_cpl(struct kvm_vcpu *vcpu);
dbab610a 360bool vmx_emulation_required(struct kvm_vcpu *vcpu);
97b7ead3
SC
361unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
362void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
363u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
364void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
72f211ec 365int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
97b7ead3 366void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
c2fe3cd4 367void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
97b7ead3
SC
368void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
369void ept_save_pdptrs(struct kvm_vcpu *vcpu);
370void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
816be9e9 371void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
e83bc09c 372u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
2ba4493a 373
b33bb78a 374bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
b6a7cc35 375void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
1b660b6b
SC
376bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
377bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
97b7ead3
SC
378bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
379void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
380void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
d85a8034 381struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
476c9bd8 382void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
4d259965 383void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
150f17bf 384bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
a128a934 385int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
43fea4e4 386void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
e23f6d49
SC
387
388void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
389void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
390
307a94c7
IS
391u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
392u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
393
e23f6d49
SC
394static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
395 int type, bool value)
396{
397 if (value)
398 vmx_enable_intercept_for_msr(vcpu, msr, type);
399 else
400 vmx_disable_intercept_for_msr(vcpu, msr, type);
401}
402
a85863c2 403void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
97b7ead3 404
5043af53
SC
405static inline bool vmx_test_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
406{
407 int f = sizeof(unsigned long);
408
409 if (msr <= 0x1fff)
410 return test_bit(msr, msr_bitmap + 0x000 / f);
411 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
412 return test_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
413 return true;
414}
415
416static inline bool vmx_test_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
417{
418 int f = sizeof(unsigned long);
419
420 if (msr <= 0x1fff)
421 return test_bit(msr, msr_bitmap + 0x800 / f);
422 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
423 return test_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
424 return true;
425}
426
427static inline void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
428{
429 int f = sizeof(unsigned long);
430
431 if (msr <= 0x1fff)
432 __clear_bit(msr, msr_bitmap + 0x000 / f);
433 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
434 __clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
435}
436
437static inline void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
438{
439 int f = sizeof(unsigned long);
440
441 if (msr <= 0x1fff)
442 __clear_bit(msr, msr_bitmap + 0x800 / f);
443 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
444 __clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
445}
446
447static inline void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
448{
449 int f = sizeof(unsigned long);
450
451 if (msr <= 0x1fff)
452 __set_bit(msr, msr_bitmap + 0x000 / f);
453 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
454 __set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
455}
456
457static inline void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
458{
459 int f = sizeof(unsigned long);
460
461 if (msr <= 0x1fff)
462 __set_bit(msr, msr_bitmap + 0x800 / f);
463 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
464 __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
465}
466
467
89b0c9f5
SC
468static inline u8 vmx_get_rvi(void)
469{
470 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
471}
472
70f932ec 473#define BUILD_CONTROLS_SHADOW(lname, uname) \
70f932ec
SC
474static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
475{ \
09e226cf
SC
476 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
477 vmcs_write32(uname, val); \
478 vmx->loaded_vmcs->controls_shadow.lname = val; \
479 } \
70f932ec 480} \
389ab252
SC
481static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
482{ \
483 return vmcs->controls_shadow.lname; \
484} \
70f932ec
SC
485static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
486{ \
389ab252 487 return __##lname##_controls_get(vmx->loaded_vmcs); \
70f932ec
SC
488} \
489static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
490{ \
491 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
492} \
493static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
494{ \
495 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
89b0c9f5 496}
70f932ec
SC
497BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
498BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
c5f2c766 499BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
2183f564 500BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
fe7f895d 501BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
89b0c9f5 502
e5d03de5
SC
503static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
504{
505 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
506 | (1 << VCPU_EXREG_RFLAGS)
507 | (1 << VCPU_EXREG_PDPTR)
508 | (1 << VCPU_EXREG_SEGMENTS)
bd31fe49 509 | (1 << VCPU_EXREG_CR0)
5addc235 510 | (1 << VCPU_EXREG_CR3)
f98c1e77 511 | (1 << VCPU_EXREG_CR4)
87915858
SC
512 | (1 << VCPU_EXREG_EXIT_INFO_1)
513 | (1 << VCPU_EXREG_EXIT_INFO_2));
e5d03de5
SC
514 vcpu->arch.regs_dirty = 0;
515}
516
8373d25d
SC
517static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
518{
519 return container_of(kvm, struct kvm_vmx, kvm);
520}
521
522static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
523{
524 return container_of(vcpu, struct vcpu_vmx, vcpu);
525}
526
5addc235
SC
527static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
528{
529 struct vcpu_vmx *vmx = to_vmx(vcpu);
530
531 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
532 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
533 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
534 }
535 return vmx->exit_qualification;
536}
537
87915858
SC
538static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
539{
540 struct vcpu_vmx *vmx = to_vmx(vcpu);
541
542 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
543 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
544 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
545 }
546 return vmx->exit_intr_info;
547}
548
41836839 549struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
89b0c9f5
SC
550void free_vmcs(struct vmcs *vmcs);
551int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
552void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
89b0c9f5
SC
553void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
554
555static inline struct vmcs *alloc_vmcs(bool shadow)
556{
41836839
BG
557 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
558 GFP_KERNEL_ACCOUNT);
89b0c9f5
SC
559}
560
6e3ba4ab
TX
561static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
562{
7b9cae02 563 return secondary_exec_controls_get(vmx) &
6e3ba4ab
TX
564 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
565}
566
a0c13434
PB
567static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
568{
b96e6506
MG
569 if (!enable_ept)
570 return true;
571
572 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
a0c13434
PB
573}
574
bddd82d1
KS
575static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
576{
577 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
578 (secondary_exec_controls_get(to_vmx(vcpu)) &
579 SECONDARY_EXEC_UNRESTRICTED_GUEST));
580}
581
2ba4493a
SC
582bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
583static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
584{
585 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
586}
587
0702a3cb 588void dump_vmcs(struct kvm_vcpu *vcpu);
69090810 589
8373d25d 590#endif /* __KVM_X86_VMX_H */