]>
Commit | Line | Data |
---|---|---|
8373d25d SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __KVM_X86_VMX_H | |
3 | #define __KVM_X86_VMX_H | |
4 | ||
5 | #include <linux/kvm_host.h> | |
6 | ||
7 | #include <asm/kvm.h> | |
f99e3daf | 8 | #include <asm/intel_pt.h> |
8373d25d SC |
9 | |
10 | #include "capabilities.h" | |
e5d03de5 | 11 | #include "kvm_cache_regs.h" |
8888cdd0 | 12 | #include "posted_intr.h" |
8373d25d | 13 | #include "vmcs.h" |
5a085326 | 14 | #include "vmx_ops.h" |
1dbf5d68 | 15 | #include "cpuid.h" |
8373d25d | 16 | |
cf3646eb | 17 | extern const u32 vmx_msr_index[]; |
cf3646eb | 18 | |
8373d25d SC |
19 | #define MSR_TYPE_R 1 |
20 | #define MSR_TYPE_W 2 | |
21 | #define MSR_TYPE_RW 3 | |
22 | ||
23 | #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) | |
24 | ||
7d73710d | 25 | #ifdef CONFIG_X86_64 |
eb3db1b1 | 26 | #define MAX_NR_USER_RETURN_MSRS 7 |
7d73710d | 27 | #else |
eb3db1b1 | 28 | #define MAX_NR_USER_RETURN_MSRS 4 |
7d73710d JM |
29 | #endif |
30 | ||
ce833b23 | 31 | #define MAX_NR_LOADSTORE_MSRS 8 |
8373d25d SC |
32 | |
33 | struct vmx_msrs { | |
34 | unsigned int nr; | |
ce833b23 | 35 | struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]; |
8373d25d SC |
36 | }; |
37 | ||
eb3db1b1 | 38 | struct vmx_uret_msr { |
802145c5 | 39 | unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */ |
8373d25d SC |
40 | u64 data; |
41 | u64 mask; | |
42 | }; | |
43 | ||
44 | enum segment_cache_field { | |
45 | SEG_FIELD_SEL = 0, | |
46 | SEG_FIELD_BASE = 1, | |
47 | SEG_FIELD_LIMIT = 2, | |
48 | SEG_FIELD_AR = 3, | |
49 | ||
50 | SEG_FIELD_NR = 4 | |
51 | }; | |
52 | ||
2ef444f1 CP |
53 | #define RTIT_ADDR_RANGE 4 |
54 | ||
55 | struct pt_ctx { | |
56 | u64 ctl; | |
57 | u64 status; | |
58 | u64 output_base; | |
59 | u64 output_mask; | |
60 | u64 cr3_match; | |
61 | u64 addr_a[RTIT_ADDR_RANGE]; | |
62 | u64 addr_b[RTIT_ADDR_RANGE]; | |
63 | }; | |
64 | ||
65 | struct pt_desc { | |
66 | u64 ctl_bitmask; | |
67 | u32 addr_range; | |
68 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; | |
69 | struct pt_ctx host; | |
70 | struct pt_ctx guest; | |
71 | }; | |
8373d25d | 72 | |
a6bdda1f SC |
73 | union vmx_exit_reason { |
74 | struct { | |
75 | u32 basic : 16; | |
76 | u32 reserved16 : 1; | |
77 | u32 reserved17 : 1; | |
78 | u32 reserved18 : 1; | |
79 | u32 reserved19 : 1; | |
80 | u32 reserved20 : 1; | |
81 | u32 reserved21 : 1; | |
82 | u32 reserved22 : 1; | |
83 | u32 reserved23 : 1; | |
84 | u32 reserved24 : 1; | |
85 | u32 reserved25 : 1; | |
86 | u32 reserved26 : 1; | |
87 | u32 enclave_mode : 1; | |
88 | u32 smi_pending_mtf : 1; | |
89 | u32 smi_from_vmx_root : 1; | |
90 | u32 reserved30 : 1; | |
91 | u32 failed_vmentry : 1; | |
92 | }; | |
93 | u32 full; | |
94 | }; | |
95 | ||
8373d25d SC |
96 | /* |
97 | * The nested_vmx structure is part of vcpu_vmx, and holds information we need | |
98 | * for correct emulation of VMX (i.e., nested VMX) on this vcpu. | |
99 | */ | |
100 | struct nested_vmx { | |
101 | /* Has the level1 guest done vmxon? */ | |
102 | bool vmxon; | |
103 | gpa_t vmxon_ptr; | |
104 | bool pml_full; | |
105 | ||
106 | /* The guest-physical address of the current VMCS L1 keeps for L2 */ | |
107 | gpa_t current_vmptr; | |
108 | /* | |
109 | * Cache of the guest's VMCS, existing outside of guest memory. | |
110 | * Loaded from guest memory during VMPTRLD. Flushed to guest | |
111 | * memory during VMCLEAR and VMPTRLD. | |
112 | */ | |
113 | struct vmcs12 *cached_vmcs12; | |
114 | /* | |
115 | * Cache of the guest's shadow VMCS, existing outside of guest | |
116 | * memory. Loaded from guest memory during VM entry. Flushed | |
117 | * to guest memory during VM exit. | |
118 | */ | |
119 | struct vmcs12 *cached_shadow_vmcs12; | |
7952d769 | 120 | |
8373d25d SC |
121 | /* |
122 | * Indicates if the shadow vmcs or enlightened vmcs must be updated | |
123 | * with the data held by struct vmcs12. | |
124 | */ | |
3731905e | 125 | bool need_vmcs12_to_shadow_sync; |
8373d25d SC |
126 | bool dirty_vmcs12; |
127 | ||
7952d769 SC |
128 | /* |
129 | * Indicates lazily loaded guest state has not yet been decached from | |
130 | * vmcs02. | |
131 | */ | |
132 | bool need_sync_vmcs02_to_vmcs12_rare; | |
133 | ||
8373d25d SC |
134 | /* |
135 | * vmcs02 has been initialized, i.e. state that is constant for | |
136 | * vmcs02 has been written to the backing VMCS. Initialization | |
137 | * is delayed until L1 actually attempts to run a nested VM. | |
138 | */ | |
139 | bool vmcs02_initialized; | |
140 | ||
141 | bool change_vmcs01_virtual_apic_mode; | |
1196cb97 | 142 | bool reload_vmcs01_apic_access_page; |
8373d25d SC |
143 | |
144 | /* | |
145 | * Enlightened VMCS has been enabled. It does not mean that L1 has to | |
146 | * use it. However, VMX features available to L1 will be limited based | |
147 | * on what the enlightened VMCS supports. | |
148 | */ | |
149 | bool enlightened_vmcs_enabled; | |
150 | ||
151 | /* L2 must run next, and mustn't decide to exit to L1. */ | |
152 | bool nested_run_pending; | |
153 | ||
5ef8acbd OU |
154 | /* Pending MTF VM-exit into L1. */ |
155 | bool mtf_pending; | |
156 | ||
8373d25d SC |
157 | struct loaded_vmcs vmcs02; |
158 | ||
159 | /* | |
160 | * Guest pages referred to in the vmcs02 with host-physical | |
161 | * pointers, so we must keep them pinned while L2 runs. | |
162 | */ | |
163 | struct page *apic_access_page; | |
96c66e87 | 164 | struct kvm_host_map virtual_apic_map; |
3278e049 | 165 | struct kvm_host_map pi_desc_map; |
31f0b6c4 KA |
166 | |
167 | struct kvm_host_map msr_bitmap_map; | |
168 | ||
8373d25d SC |
169 | struct pi_desc *pi_desc; |
170 | bool pi_pending; | |
171 | u16 posted_intr_nv; | |
172 | ||
173 | struct hrtimer preemption_timer; | |
850448f3 PS |
174 | u64 preemption_timer_deadline; |
175 | bool has_preemption_timer_deadline; | |
8373d25d SC |
176 | bool preemption_timer_expired; |
177 | ||
178 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ | |
179 | u64 vmcs01_debugctl; | |
180 | u64 vmcs01_guest_bndcfgs; | |
181 | ||
02d496cf LA |
182 | /* to migrate it to L1 if L2 writes to L1's CR8 directly */ |
183 | int l1_tpr_threshold; | |
184 | ||
8373d25d SC |
185 | u16 vpid02; |
186 | u16 last_vpid; | |
187 | ||
188 | struct nested_vmx_msrs msrs; | |
189 | ||
190 | /* SMM related state */ | |
191 | struct { | |
192 | /* in VMX operation on SMM entry? */ | |
193 | bool vmxon; | |
194 | /* in guest mode on SMM entry? */ | |
195 | bool guest_mode; | |
196 | } smm; | |
197 | ||
198 | gpa_t hv_evmcs_vmptr; | |
dee9c049 | 199 | struct kvm_host_map hv_evmcs_map; |
8373d25d SC |
200 | struct hv_enlightened_vmcs *hv_evmcs; |
201 | }; | |
202 | ||
203 | struct vcpu_vmx { | |
204 | struct kvm_vcpu vcpu; | |
8373d25d SC |
205 | u8 fail; |
206 | u8 msr_bitmap_mode; | |
b464f57e PB |
207 | |
208 | /* | |
209 | * If true, host state has been stored in vmx->loaded_vmcs for | |
210 | * the CPU registers that only need to be switched when transitioning | |
211 | * to/from the kernel, and the registers have been loaded with guest | |
212 | * values. If false, host state is loaded in the CPU registers | |
213 | * and vmx->loaded_vmcs->host_state is invalid. | |
214 | */ | |
215 | bool guest_state_loaded; | |
216 | ||
5addc235 | 217 | unsigned long exit_qualification; |
8373d25d SC |
218 | u32 exit_intr_info; |
219 | u32 idt_vectoring_info; | |
220 | ulong rflags; | |
70f932ec | 221 | |
eb3db1b1 | 222 | struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; |
fbc18007 | 223 | int nr_uret_msrs; |
e9bb1ae9 | 224 | int nr_active_uret_msrs; |
658ece84 | 225 | bool guest_uret_msrs_loaded; |
8373d25d SC |
226 | #ifdef CONFIG_X86_64 |
227 | u64 msr_host_kernel_gs_base; | |
228 | u64 msr_guest_kernel_gs_base; | |
229 | #endif | |
230 | ||
8373d25d | 231 | u64 spec_ctrl; |
6e3ba4ab | 232 | u32 msr_ia32_umwait_control; |
8373d25d | 233 | |
8373d25d SC |
234 | u32 secondary_exec_control; |
235 | ||
236 | /* | |
237 | * loaded_vmcs points to the VMCS currently used in this vcpu. For a | |
238 | * non-nested (L1) guest, it always points to vmcs01. For a nested | |
b464f57e | 239 | * guest (L2), it points to a different VMCS. |
8373d25d SC |
240 | */ |
241 | struct loaded_vmcs vmcs01; | |
242 | struct loaded_vmcs *loaded_vmcs; | |
c9afc58c | 243 | |
8373d25d SC |
244 | struct msr_autoload { |
245 | struct vmx_msrs guest; | |
246 | struct vmx_msrs host; | |
247 | } msr_autoload; | |
248 | ||
662f1d1d AL |
249 | struct msr_autostore { |
250 | struct vmx_msrs guest; | |
251 | } msr_autostore; | |
252 | ||
8373d25d SC |
253 | struct { |
254 | int vm86_active; | |
255 | ulong save_rflags; | |
256 | struct kvm_segment segs[8]; | |
257 | } rmode; | |
258 | struct { | |
259 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ | |
260 | struct kvm_save_segment { | |
261 | u16 selector; | |
262 | unsigned long base; | |
263 | u32 limit; | |
264 | u32 ar; | |
265 | } seg[8]; | |
266 | } segment_cache; | |
267 | int vpid; | |
268 | bool emulation_required; | |
269 | ||
a6bdda1f | 270 | union vmx_exit_reason exit_reason; |
8373d25d SC |
271 | |
272 | /* Posted interrupt descriptor */ | |
273 | struct pi_desc pi_desc; | |
274 | ||
275 | /* Support for a guest hypervisor (nested VMX) */ | |
276 | struct nested_vmx nested; | |
277 | ||
278 | /* Dynamic PLE window. */ | |
c5c5d6fa | 279 | unsigned int ple_window; |
8373d25d SC |
280 | bool ple_window_dirty; |
281 | ||
282 | bool req_immediate_exit; | |
283 | ||
284 | /* Support for PML */ | |
285 | #define PML_ENTITY_NUM 512 | |
286 | struct page *pml_pg; | |
287 | ||
288 | /* apic deadline value in host tsc */ | |
289 | u64 hv_deadline_tsc; | |
290 | ||
291 | u64 current_tsc_ratio; | |
292 | ||
8373d25d SC |
293 | unsigned long host_debugctlmsr; |
294 | ||
295 | /* | |
296 | * Only bits masked by msr_ia32_feature_control_valid_bits can be set in | |
32ad73db | 297 | * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included |
8373d25d SC |
298 | * in msr_ia32_feature_control_valid_bits. |
299 | */ | |
300 | u64 msr_ia32_feature_control; | |
301 | u64 msr_ia32_feature_control_valid_bits; | |
302 | u64 ept_pointer; | |
2ef444f1 CP |
303 | |
304 | struct pt_desc pt_desc; | |
3eb90017 AG |
305 | |
306 | /* Save desired MSR intercept (read: pass-through) state */ | |
307 | #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13 | |
308 | struct { | |
309 | DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); | |
310 | DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); | |
311 | } shadow_msr_intercept; | |
8373d25d SC |
312 | }; |
313 | ||
314 | enum ept_pointers_status { | |
315 | EPT_POINTERS_CHECK = 0, | |
316 | EPT_POINTERS_MATCH = 1, | |
317 | EPT_POINTERS_MISMATCH = 2 | |
318 | }; | |
319 | ||
320 | struct kvm_vmx { | |
321 | struct kvm kvm; | |
322 | ||
323 | unsigned int tss_addr; | |
324 | bool ept_identity_pagetable_done; | |
325 | gpa_t ept_identity_map_addr; | |
326 | ||
327 | enum ept_pointers_status ept_pointers_match; | |
328 | spinlock_t ept_pointer_lock; | |
329 | }; | |
330 | ||
7c97fcb3 | 331 | bool nested_vmx_allowed(struct kvm_vcpu *vcpu); |
5c911bef SC |
332 | void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, |
333 | struct loaded_vmcs *buddy); | |
97b7ead3 SC |
334 | int allocate_vpid(void); |
335 | void free_vpid(int vpid); | |
336 | void vmx_set_constant_host_state(struct vcpu_vmx *vmx); | |
337 | void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); | |
13b964a2 SC |
338 | void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, |
339 | unsigned long fs_base, unsigned long gs_base); | |
97b7ead3 SC |
340 | int vmx_get_cpl(struct kvm_vcpu *vcpu); |
341 | unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); | |
342 | void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
343 | u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); | |
344 | void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); | |
72f211ec | 345 | int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
97b7ead3 | 346 | void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
c2fe3cd4 | 347 | void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
97b7ead3 SC |
348 | void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); |
349 | void ept_save_pdptrs(struct kvm_vcpu *vcpu); | |
350 | void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
351 | void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
2a40b900 SC |
352 | u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, |
353 | int root_level); | |
2ba4493a | 354 | |
97b7ead3 SC |
355 | void update_exception_bitmap(struct kvm_vcpu *vcpu); |
356 | void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); | |
1b660b6b SC |
357 | bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); |
358 | bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); | |
97b7ead3 SC |
359 | bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); |
360 | void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); | |
361 | void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
d85a8034 | 362 | struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); |
476c9bd8 | 363 | void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); |
4d259965 | 364 | void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); |
a128a934 | 365 | int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); |
43fea4e4 | 366 | void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); |
97b7ead3 | 367 | |
89b0c9f5 SC |
368 | static inline u8 vmx_get_rvi(void) |
369 | { | |
370 | return vmcs_read16(GUEST_INTR_STATUS) & 0xff; | |
371 | } | |
372 | ||
70f932ec | 373 | #define BUILD_CONTROLS_SHADOW(lname, uname) \ |
70f932ec SC |
374 | static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ |
375 | { \ | |
09e226cf SC |
376 | if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ |
377 | vmcs_write32(uname, val); \ | |
378 | vmx->loaded_vmcs->controls_shadow.lname = val; \ | |
379 | } \ | |
70f932ec SC |
380 | } \ |
381 | static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ | |
382 | { \ | |
09e226cf | 383 | return vmx->loaded_vmcs->controls_shadow.lname; \ |
70f932ec SC |
384 | } \ |
385 | static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ | |
386 | { \ | |
387 | lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ | |
388 | } \ | |
389 | static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ | |
390 | { \ | |
391 | lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ | |
89b0c9f5 | 392 | } |
70f932ec SC |
393 | BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) |
394 | BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) | |
c5f2c766 | 395 | BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) |
2183f564 | 396 | BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) |
fe7f895d | 397 | BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) |
89b0c9f5 | 398 | |
e5d03de5 SC |
399 | static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu) |
400 | { | |
401 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | |
402 | | (1 << VCPU_EXREG_RFLAGS) | |
403 | | (1 << VCPU_EXREG_PDPTR) | |
404 | | (1 << VCPU_EXREG_SEGMENTS) | |
bd31fe49 | 405 | | (1 << VCPU_EXREG_CR0) |
5addc235 | 406 | | (1 << VCPU_EXREG_CR3) |
f98c1e77 | 407 | | (1 << VCPU_EXREG_CR4) |
87915858 SC |
408 | | (1 << VCPU_EXREG_EXIT_INFO_1) |
409 | | (1 << VCPU_EXREG_EXIT_INFO_2)); | |
e5d03de5 SC |
410 | vcpu->arch.regs_dirty = 0; |
411 | } | |
412 | ||
8373d25d SC |
413 | static inline u32 vmx_vmentry_ctrl(void) |
414 | { | |
f99e3daf | 415 | u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; |
2ef7619d | 416 | if (vmx_pt_mode_is_system()) |
d9293597 YZ |
417 | vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | |
418 | VM_ENTRY_LOAD_IA32_RTIT_CTL); | |
8373d25d | 419 | /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ |
f99e3daf | 420 | return vmentry_ctrl & |
8373d25d SC |
421 | ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); |
422 | } | |
423 | ||
424 | static inline u32 vmx_vmexit_ctrl(void) | |
425 | { | |
f99e3daf | 426 | u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; |
2ef7619d | 427 | if (vmx_pt_mode_is_system()) |
d9293597 YZ |
428 | vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | |
429 | VM_EXIT_CLEAR_IA32_RTIT_CTL); | |
8373d25d | 430 | /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ |
d9293597 | 431 | return vmexit_ctrl & |
8373d25d SC |
432 | ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); |
433 | } | |
434 | ||
435 | u32 vmx_exec_control(struct vcpu_vmx *vmx); | |
c075c3e4 | 436 | u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx); |
8373d25d SC |
437 | |
438 | static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) | |
439 | { | |
440 | return container_of(kvm, struct kvm_vmx, kvm); | |
441 | } | |
442 | ||
443 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |
444 | { | |
445 | return container_of(vcpu, struct vcpu_vmx, vcpu); | |
446 | } | |
447 | ||
5addc235 SC |
448 | static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu) |
449 | { | |
450 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
451 | ||
452 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) { | |
453 | kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); | |
454 | vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
455 | } | |
456 | return vmx->exit_qualification; | |
457 | } | |
458 | ||
87915858 SC |
459 | static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu) |
460 | { | |
461 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
462 | ||
463 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) { | |
464 | kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); | |
465 | vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | |
466 | } | |
467 | return vmx->exit_intr_info; | |
468 | } | |
469 | ||
41836839 | 470 | struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); |
89b0c9f5 SC |
471 | void free_vmcs(struct vmcs *vmcs); |
472 | int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
473 | void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
89b0c9f5 SC |
474 | void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); |
475 | ||
476 | static inline struct vmcs *alloc_vmcs(bool shadow) | |
477 | { | |
41836839 BG |
478 | return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), |
479 | GFP_KERNEL_ACCOUNT); | |
89b0c9f5 SC |
480 | } |
481 | ||
89b0c9f5 SC |
482 | static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) |
483 | { | |
484 | vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; | |
485 | vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); | |
486 | } | |
487 | ||
6e3ba4ab TX |
488 | static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) |
489 | { | |
490 | return vmx->secondary_exec_control & | |
491 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; | |
492 | } | |
493 | ||
a0c13434 PB |
494 | static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) |
495 | { | |
b96e6506 MG |
496 | if (!enable_ept) |
497 | return true; | |
498 | ||
499 | return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; | |
a0c13434 PB |
500 | } |
501 | ||
bddd82d1 KS |
502 | static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) |
503 | { | |
504 | return enable_unrestricted_guest && (!is_guest_mode(vcpu) || | |
505 | (secondary_exec_controls_get(to_vmx(vcpu)) & | |
506 | SECONDARY_EXEC_UNRESTRICTED_GUEST)); | |
507 | } | |
508 | ||
2ba4493a SC |
509 | bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu); |
510 | static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu) | |
511 | { | |
512 | return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu); | |
513 | } | |
514 | ||
69090810 PB |
515 | void dump_vmcs(void); |
516 | ||
8373d25d | 517 | #endif /* __KVM_X86_VMX_H */ |