]>
Commit | Line | Data |
---|---|---|
8373d25d SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __KVM_X86_VMX_H | |
3 | #define __KVM_X86_VMX_H | |
4 | ||
5 | #include <linux/kvm_host.h> | |
6 | ||
7 | #include <asm/kvm.h> | |
f99e3daf | 8 | #include <asm/intel_pt.h> |
8373d25d SC |
9 | |
10 | #include "capabilities.h" | |
89b0c9f5 | 11 | #include "ops.h" |
8373d25d SC |
12 | #include "vmcs.h" |
13 | ||
cf3646eb | 14 | extern const u32 vmx_msr_index[]; |
cf3646eb SC |
15 | extern u64 host_efer; |
16 | ||
6e3ba4ab TX |
17 | extern u32 get_umwait_control_msr(void); |
18 | ||
8373d25d SC |
19 | #define MSR_TYPE_R 1 |
20 | #define MSR_TYPE_W 2 | |
21 | #define MSR_TYPE_RW 3 | |
22 | ||
23 | #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) | |
24 | ||
7d73710d JM |
25 | #ifdef CONFIG_X86_64 |
26 | #define NR_SHARED_MSRS 7 | |
27 | #else | |
28 | #define NR_SHARED_MSRS 4 | |
29 | #endif | |
30 | ||
7cfe0526 | 31 | #define NR_LOADSTORE_MSRS 8 |
8373d25d SC |
32 | |
33 | struct vmx_msrs { | |
34 | unsigned int nr; | |
7cfe0526 | 35 | struct vmx_msr_entry val[NR_LOADSTORE_MSRS]; |
8373d25d SC |
36 | }; |
37 | ||
38 | struct shared_msr_entry { | |
39 | unsigned index; | |
40 | u64 data; | |
41 | u64 mask; | |
42 | }; | |
43 | ||
44 | enum segment_cache_field { | |
45 | SEG_FIELD_SEL = 0, | |
46 | SEG_FIELD_BASE = 1, | |
47 | SEG_FIELD_LIMIT = 2, | |
48 | SEG_FIELD_AR = 3, | |
49 | ||
50 | SEG_FIELD_NR = 4 | |
51 | }; | |
52 | ||
53 | /* Posted-Interrupt Descriptor */ | |
54 | struct pi_desc { | |
55 | u32 pir[8]; /* Posted interrupt requested */ | |
56 | union { | |
57 | struct { | |
58 | /* bit 256 - Outstanding Notification */ | |
59 | u16 on : 1, | |
60 | /* bit 257 - Suppress Notification */ | |
61 | sn : 1, | |
62 | /* bit 271:258 - Reserved */ | |
63 | rsvd_1 : 14; | |
64 | /* bit 279:272 - Notification Vector */ | |
65 | u8 nv; | |
66 | /* bit 287:280 - Reserved */ | |
67 | u8 rsvd_2; | |
68 | /* bit 319:288 - Notification Destination */ | |
69 | u32 ndst; | |
70 | }; | |
71 | u64 control; | |
72 | }; | |
73 | u32 rsvd[6]; | |
74 | } __aligned(64); | |
75 | ||
2ef444f1 CP |
76 | #define RTIT_ADDR_RANGE 4 |
77 | ||
78 | struct pt_ctx { | |
79 | u64 ctl; | |
80 | u64 status; | |
81 | u64 output_base; | |
82 | u64 output_mask; | |
83 | u64 cr3_match; | |
84 | u64 addr_a[RTIT_ADDR_RANGE]; | |
85 | u64 addr_b[RTIT_ADDR_RANGE]; | |
86 | }; | |
87 | ||
88 | struct pt_desc { | |
89 | u64 ctl_bitmask; | |
90 | u32 addr_range; | |
91 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; | |
92 | struct pt_ctx host; | |
93 | struct pt_ctx guest; | |
94 | }; | |
8373d25d SC |
95 | |
96 | /* | |
97 | * The nested_vmx structure is part of vcpu_vmx, and holds information we need | |
98 | * for correct emulation of VMX (i.e., nested VMX) on this vcpu. | |
99 | */ | |
100 | struct nested_vmx { | |
101 | /* Has the level1 guest done vmxon? */ | |
102 | bool vmxon; | |
103 | gpa_t vmxon_ptr; | |
104 | bool pml_full; | |
105 | ||
106 | /* The guest-physical address of the current VMCS L1 keeps for L2 */ | |
107 | gpa_t current_vmptr; | |
108 | /* | |
109 | * Cache of the guest's VMCS, existing outside of guest memory. | |
110 | * Loaded from guest memory during VMPTRLD. Flushed to guest | |
111 | * memory during VMCLEAR and VMPTRLD. | |
112 | */ | |
113 | struct vmcs12 *cached_vmcs12; | |
114 | /* | |
115 | * Cache of the guest's shadow VMCS, existing outside of guest | |
116 | * memory. Loaded from guest memory during VM entry. Flushed | |
117 | * to guest memory during VM exit. | |
118 | */ | |
119 | struct vmcs12 *cached_shadow_vmcs12; | |
7952d769 | 120 | |
8373d25d SC |
121 | /* |
122 | * Indicates if the shadow vmcs or enlightened vmcs must be updated | |
123 | * with the data held by struct vmcs12. | |
124 | */ | |
3731905e | 125 | bool need_vmcs12_to_shadow_sync; |
8373d25d SC |
126 | bool dirty_vmcs12; |
127 | ||
7952d769 SC |
128 | /* |
129 | * Indicates lazily loaded guest state has not yet been decached from | |
130 | * vmcs02. | |
131 | */ | |
132 | bool need_sync_vmcs02_to_vmcs12_rare; | |
133 | ||
8373d25d SC |
134 | /* |
135 | * vmcs02 has been initialized, i.e. state that is constant for | |
136 | * vmcs02 has been written to the backing VMCS. Initialization | |
137 | * is delayed until L1 actually attempts to run a nested VM. | |
138 | */ | |
139 | bool vmcs02_initialized; | |
140 | ||
141 | bool change_vmcs01_virtual_apic_mode; | |
142 | ||
143 | /* | |
144 | * Enlightened VMCS has been enabled. It does not mean that L1 has to | |
145 | * use it. However, VMX features available to L1 will be limited based | |
146 | * on what the enlightened VMCS supports. | |
147 | */ | |
148 | bool enlightened_vmcs_enabled; | |
149 | ||
150 | /* L2 must run next, and mustn't decide to exit to L1. */ | |
151 | bool nested_run_pending; | |
152 | ||
5ef8acbd OU |
153 | /* Pending MTF VM-exit into L1. */ |
154 | bool mtf_pending; | |
155 | ||
8373d25d SC |
156 | struct loaded_vmcs vmcs02; |
157 | ||
158 | /* | |
159 | * Guest pages referred to in the vmcs02 with host-physical | |
160 | * pointers, so we must keep them pinned while L2 runs. | |
161 | */ | |
162 | struct page *apic_access_page; | |
96c66e87 | 163 | struct kvm_host_map virtual_apic_map; |
3278e049 | 164 | struct kvm_host_map pi_desc_map; |
31f0b6c4 KA |
165 | |
166 | struct kvm_host_map msr_bitmap_map; | |
167 | ||
8373d25d SC |
168 | struct pi_desc *pi_desc; |
169 | bool pi_pending; | |
170 | u16 posted_intr_nv; | |
171 | ||
172 | struct hrtimer preemption_timer; | |
173 | bool preemption_timer_expired; | |
174 | ||
175 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ | |
176 | u64 vmcs01_debugctl; | |
177 | u64 vmcs01_guest_bndcfgs; | |
178 | ||
02d496cf LA |
179 | /* to migrate it to L1 if L2 writes to L1's CR8 directly */ |
180 | int l1_tpr_threshold; | |
181 | ||
8373d25d SC |
182 | u16 vpid02; |
183 | u16 last_vpid; | |
184 | ||
185 | struct nested_vmx_msrs msrs; | |
186 | ||
187 | /* SMM related state */ | |
188 | struct { | |
189 | /* in VMX operation on SMM entry? */ | |
190 | bool vmxon; | |
191 | /* in guest mode on SMM entry? */ | |
192 | bool guest_mode; | |
193 | } smm; | |
194 | ||
195 | gpa_t hv_evmcs_vmptr; | |
dee9c049 | 196 | struct kvm_host_map hv_evmcs_map; |
8373d25d SC |
197 | struct hv_enlightened_vmcs *hv_evmcs; |
198 | }; | |
199 | ||
200 | struct vcpu_vmx { | |
201 | struct kvm_vcpu vcpu; | |
8373d25d SC |
202 | u8 fail; |
203 | u8 msr_bitmap_mode; | |
b464f57e PB |
204 | |
205 | /* | |
206 | * If true, host state has been stored in vmx->loaded_vmcs for | |
207 | * the CPU registers that only need to be switched when transitioning | |
208 | * to/from the kernel, and the registers have been loaded with guest | |
209 | * values. If false, host state is loaded in the CPU registers | |
210 | * and vmx->loaded_vmcs->host_state is invalid. | |
211 | */ | |
212 | bool guest_state_loaded; | |
213 | ||
8373d25d SC |
214 | u32 exit_intr_info; |
215 | u32 idt_vectoring_info; | |
216 | ulong rflags; | |
70f932ec | 217 | |
7d73710d | 218 | struct shared_msr_entry guest_msrs[NR_SHARED_MSRS]; |
8373d25d SC |
219 | int nmsrs; |
220 | int save_nmsrs; | |
b464f57e | 221 | bool guest_msrs_ready; |
8373d25d SC |
222 | #ifdef CONFIG_X86_64 |
223 | u64 msr_host_kernel_gs_base; | |
224 | u64 msr_guest_kernel_gs_base; | |
225 | #endif | |
226 | ||
8373d25d | 227 | u64 spec_ctrl; |
6e3ba4ab | 228 | u32 msr_ia32_umwait_control; |
8373d25d | 229 | |
8373d25d SC |
230 | u32 secondary_exec_control; |
231 | ||
232 | /* | |
233 | * loaded_vmcs points to the VMCS currently used in this vcpu. For a | |
234 | * non-nested (L1) guest, it always points to vmcs01. For a nested | |
b464f57e | 235 | * guest (L2), it points to a different VMCS. |
8373d25d SC |
236 | */ |
237 | struct loaded_vmcs vmcs01; | |
238 | struct loaded_vmcs *loaded_vmcs; | |
c9afc58c | 239 | |
8373d25d SC |
240 | struct msr_autoload { |
241 | struct vmx_msrs guest; | |
242 | struct vmx_msrs host; | |
243 | } msr_autoload; | |
244 | ||
662f1d1d AL |
245 | struct msr_autostore { |
246 | struct vmx_msrs guest; | |
247 | } msr_autostore; | |
248 | ||
8373d25d SC |
249 | struct { |
250 | int vm86_active; | |
251 | ulong save_rflags; | |
252 | struct kvm_segment segs[8]; | |
253 | } rmode; | |
254 | struct { | |
255 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ | |
256 | struct kvm_save_segment { | |
257 | u16 selector; | |
258 | unsigned long base; | |
259 | u32 limit; | |
260 | u32 ar; | |
261 | } seg[8]; | |
262 | } segment_cache; | |
263 | int vpid; | |
264 | bool emulation_required; | |
265 | ||
266 | u32 exit_reason; | |
267 | ||
268 | /* Posted interrupt descriptor */ | |
269 | struct pi_desc pi_desc; | |
270 | ||
271 | /* Support for a guest hypervisor (nested VMX) */ | |
272 | struct nested_vmx nested; | |
273 | ||
274 | /* Dynamic PLE window. */ | |
c5c5d6fa | 275 | unsigned int ple_window; |
8373d25d SC |
276 | bool ple_window_dirty; |
277 | ||
278 | bool req_immediate_exit; | |
279 | ||
280 | /* Support for PML */ | |
281 | #define PML_ENTITY_NUM 512 | |
282 | struct page *pml_pg; | |
283 | ||
284 | /* apic deadline value in host tsc */ | |
285 | u64 hv_deadline_tsc; | |
286 | ||
287 | u64 current_tsc_ratio; | |
288 | ||
289 | u32 host_pkru; | |
290 | ||
291 | unsigned long host_debugctlmsr; | |
292 | ||
293 | /* | |
294 | * Only bits masked by msr_ia32_feature_control_valid_bits can be set in | |
32ad73db | 295 | * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included |
8373d25d SC |
296 | * in msr_ia32_feature_control_valid_bits. |
297 | */ | |
298 | u64 msr_ia32_feature_control; | |
299 | u64 msr_ia32_feature_control_valid_bits; | |
300 | u64 ept_pointer; | |
2ef444f1 CP |
301 | |
302 | struct pt_desc pt_desc; | |
8373d25d SC |
303 | }; |
304 | ||
305 | enum ept_pointers_status { | |
306 | EPT_POINTERS_CHECK = 0, | |
307 | EPT_POINTERS_MATCH = 1, | |
308 | EPT_POINTERS_MISMATCH = 2 | |
309 | }; | |
310 | ||
311 | struct kvm_vmx { | |
312 | struct kvm kvm; | |
313 | ||
314 | unsigned int tss_addr; | |
315 | bool ept_identity_pagetable_done; | |
316 | gpa_t ept_identity_map_addr; | |
317 | ||
318 | enum ept_pointers_status ept_pointers_match; | |
319 | spinlock_t ept_pointer_lock; | |
320 | }; | |
321 | ||
7c97fcb3 | 322 | bool nested_vmx_allowed(struct kvm_vcpu *vcpu); |
8ef863e6 | 323 | void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu); |
97b7ead3 | 324 | void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
97b7ead3 SC |
325 | int allocate_vpid(void); |
326 | void free_vpid(int vpid); | |
327 | void vmx_set_constant_host_state(struct vcpu_vmx *vmx); | |
328 | void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); | |
13b964a2 SC |
329 | void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, |
330 | unsigned long fs_base, unsigned long gs_base); | |
97b7ead3 SC |
331 | int vmx_get_cpl(struct kvm_vcpu *vcpu); |
332 | unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); | |
333 | void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
334 | u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); | |
335 | void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); | |
336 | void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); | |
337 | void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |
338 | void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | |
339 | int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | |
340 | void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); | |
341 | void ept_save_pdptrs(struct kvm_vcpu *vcpu); | |
342 | void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
343 | void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
344 | u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); | |
345 | void update_exception_bitmap(struct kvm_vcpu *vcpu); | |
346 | void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); | |
347 | bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); | |
348 | void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); | |
349 | void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
350 | struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); | |
b08c2896 | 351 | void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); |
4d259965 | 352 | void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); |
662f1d1d | 353 | int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); |
97b7ead3 | 354 | |
8373d25d SC |
355 | #define POSTED_INTR_ON 0 |
356 | #define POSTED_INTR_SN 1 | |
357 | ||
358 | static inline bool pi_test_and_set_on(struct pi_desc *pi_desc) | |
359 | { | |
360 | return test_and_set_bit(POSTED_INTR_ON, | |
361 | (unsigned long *)&pi_desc->control); | |
362 | } | |
363 | ||
364 | static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) | |
365 | { | |
366 | return test_and_clear_bit(POSTED_INTR_ON, | |
367 | (unsigned long *)&pi_desc->control); | |
368 | } | |
369 | ||
370 | static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) | |
371 | { | |
372 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); | |
373 | } | |
374 | ||
29881b6e JM |
375 | static inline bool pi_is_pir_empty(struct pi_desc *pi_desc) |
376 | { | |
377 | return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS); | |
378 | } | |
379 | ||
81b01667 | 380 | static inline void pi_set_sn(struct pi_desc *pi_desc) |
8373d25d | 381 | { |
81b01667 LK |
382 | set_bit(POSTED_INTR_SN, |
383 | (unsigned long *)&pi_desc->control); | |
8373d25d SC |
384 | } |
385 | ||
81b01667 | 386 | static inline void pi_set_on(struct pi_desc *pi_desc) |
8373d25d | 387 | { |
81b01667 LK |
388 | set_bit(POSTED_INTR_ON, |
389 | (unsigned long *)&pi_desc->control); | |
8373d25d SC |
390 | } |
391 | ||
392 | static inline void pi_clear_on(struct pi_desc *pi_desc) | |
393 | { | |
394 | clear_bit(POSTED_INTR_ON, | |
395 | (unsigned long *)&pi_desc->control); | |
132194ff JM |
396 | } |
397 | ||
398 | static inline void pi_clear_sn(struct pi_desc *pi_desc) | |
399 | { | |
400 | clear_bit(POSTED_INTR_SN, | |
401 | (unsigned long *)&pi_desc->control); | |
8373d25d SC |
402 | } |
403 | ||
404 | static inline int pi_test_on(struct pi_desc *pi_desc) | |
405 | { | |
406 | return test_bit(POSTED_INTR_ON, | |
407 | (unsigned long *)&pi_desc->control); | |
408 | } | |
409 | ||
410 | static inline int pi_test_sn(struct pi_desc *pi_desc) | |
411 | { | |
412 | return test_bit(POSTED_INTR_SN, | |
413 | (unsigned long *)&pi_desc->control); | |
414 | } | |
415 | ||
89b0c9f5 SC |
416 | static inline u8 vmx_get_rvi(void) |
417 | { | |
418 | return vmcs_read16(GUEST_INTR_STATUS) & 0xff; | |
419 | } | |
420 | ||
70f932ec | 421 | #define BUILD_CONTROLS_SHADOW(lname, uname) \ |
70f932ec SC |
422 | static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ |
423 | { \ | |
09e226cf SC |
424 | if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ |
425 | vmcs_write32(uname, val); \ | |
426 | vmx->loaded_vmcs->controls_shadow.lname = val; \ | |
427 | } \ | |
70f932ec SC |
428 | } \ |
429 | static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ | |
430 | { \ | |
09e226cf | 431 | return vmx->loaded_vmcs->controls_shadow.lname; \ |
70f932ec SC |
432 | } \ |
433 | static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ | |
434 | { \ | |
435 | lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ | |
436 | } \ | |
437 | static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ | |
438 | { \ | |
439 | lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ | |
89b0c9f5 | 440 | } |
70f932ec SC |
441 | BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) |
442 | BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) | |
c5f2c766 | 443 | BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) |
2183f564 | 444 | BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) |
fe7f895d | 445 | BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) |
89b0c9f5 | 446 | |
8373d25d SC |
447 | static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) |
448 | { | |
449 | vmx->segment_cache.bitmask = 0; | |
450 | } | |
451 | ||
452 | static inline u32 vmx_vmentry_ctrl(void) | |
453 | { | |
f99e3daf CP |
454 | u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; |
455 | if (pt_mode == PT_MODE_SYSTEM) | |
d9293597 YZ |
456 | vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | |
457 | VM_ENTRY_LOAD_IA32_RTIT_CTL); | |
8373d25d | 458 | /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ |
f99e3daf | 459 | return vmentry_ctrl & |
8373d25d SC |
460 | ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); |
461 | } | |
462 | ||
463 | static inline u32 vmx_vmexit_ctrl(void) | |
464 | { | |
f99e3daf CP |
465 | u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; |
466 | if (pt_mode == PT_MODE_SYSTEM) | |
d9293597 YZ |
467 | vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | |
468 | VM_EXIT_CLEAR_IA32_RTIT_CTL); | |
8373d25d | 469 | /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ |
d9293597 | 470 | return vmexit_ctrl & |
8373d25d SC |
471 | ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); |
472 | } | |
473 | ||
474 | u32 vmx_exec_control(struct vcpu_vmx *vmx); | |
c075c3e4 | 475 | u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx); |
8373d25d SC |
476 | |
477 | static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) | |
478 | { | |
479 | return container_of(kvm, struct kvm_vmx, kvm); | |
480 | } | |
481 | ||
482 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |
483 | { | |
484 | return container_of(vcpu, struct vcpu_vmx, vcpu); | |
485 | } | |
486 | ||
487 | static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) | |
488 | { | |
489 | return &(to_vmx(vcpu)->pi_desc); | |
490 | } | |
491 | ||
41836839 | 492 | struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); |
89b0c9f5 SC |
493 | void free_vmcs(struct vmcs *vmcs); |
494 | int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
495 | void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
496 | void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs); | |
497 | void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); | |
498 | ||
499 | static inline struct vmcs *alloc_vmcs(bool shadow) | |
500 | { | |
41836839 BG |
501 | return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), |
502 | GFP_KERNEL_ACCOUNT); | |
89b0c9f5 SC |
503 | } |
504 | ||
505 | u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); | |
506 | ||
507 | static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, | |
508 | bool invalidate_gpa) | |
509 | { | |
510 | if (enable_ept && (invalidate_gpa || !enable_vpid)) { | |
511 | if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) | |
512 | return; | |
513 | ept_sync_context(construct_eptp(vcpu, | |
514 | vcpu->arch.mmu->root_hpa)); | |
515 | } else { | |
516 | vpid_sync_context(vpid); | |
517 | } | |
518 | } | |
519 | ||
520 | static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) | |
521 | { | |
522 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); | |
523 | } | |
524 | ||
525 | static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) | |
526 | { | |
527 | vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; | |
528 | vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); | |
529 | } | |
530 | ||
6e3ba4ab TX |
531 | static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) |
532 | { | |
533 | return vmx->secondary_exec_control & | |
534 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; | |
535 | } | |
536 | ||
69090810 PB |
537 | void dump_vmcs(void); |
538 | ||
8373d25d | 539 | #endif /* __KVM_X86_VMX_H */ |