]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * This module enables machines with Intel VT-x extensions to run virtual | |
5 | * machines without emulation or binary translation. | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * | |
9 | * Authors: | |
10 | * Avi Kivity <avi@qumranet.com> | |
11 | * Yaniv Kamay <yaniv@qumranet.com> | |
12 | * | |
13 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
14 | * the COPYING file in the top-level directory. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include "irq.h" | |
19 | #include "mmu.h" | |
20 | ||
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/moduleparam.h> | |
28 | #include <linux/ftrace_event.h> | |
29 | #include "kvm_cache_regs.h" | |
30 | #include "x86.h" | |
31 | ||
32 | #include <asm/io.h> | |
33 | #include <asm/desc.h> | |
34 | #include <asm/vmx.h> | |
35 | #include <asm/virtext.h> | |
36 | #include <asm/mce.h> | |
37 | ||
38 | #include "trace.h" | |
39 | ||
40 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | |
41 | ||
42 | MODULE_AUTHOR("Qumranet"); | |
43 | MODULE_LICENSE("GPL"); | |
44 | ||
45 | static int __read_mostly bypass_guest_pf = 1; | |
46 | module_param(bypass_guest_pf, bool, S_IRUGO); | |
47 | ||
48 | static int __read_mostly enable_vpid = 1; | |
49 | module_param_named(vpid, enable_vpid, bool, 0444); | |
50 | ||
51 | static int __read_mostly flexpriority_enabled = 1; | |
52 | module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); | |
53 | ||
54 | static int __read_mostly enable_ept = 1; | |
55 | module_param_named(ept, enable_ept, bool, S_IRUGO); | |
56 | ||
57 | static int __read_mostly enable_unrestricted_guest = 1; | |
58 | module_param_named(unrestricted_guest, | |
59 | enable_unrestricted_guest, bool, S_IRUGO); | |
60 | ||
61 | static int __read_mostly emulate_invalid_guest_state = 0; | |
62 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |
63 | ||
64 | struct vmcs { | |
65 | u32 revision_id; | |
66 | u32 abort; | |
67 | char data[0]; | |
68 | }; | |
69 | ||
70 | struct vcpu_vmx { | |
71 | struct kvm_vcpu vcpu; | |
72 | struct list_head local_vcpus_link; | |
73 | unsigned long host_rsp; | |
74 | int launched; | |
75 | u8 fail; | |
76 | u32 idt_vectoring_info; | |
77 | struct kvm_msr_entry *guest_msrs; | |
78 | struct kvm_msr_entry *host_msrs; | |
79 | int nmsrs; | |
80 | int save_nmsrs; | |
81 | int msr_offset_efer; | |
82 | #ifdef CONFIG_X86_64 | |
83 | int msr_offset_kernel_gs_base; | |
84 | #endif | |
85 | struct vmcs *vmcs; | |
86 | struct { | |
87 | int loaded; | |
88 | u16 fs_sel, gs_sel, ldt_sel; | |
89 | int gs_ldt_reload_needed; | |
90 | int fs_reload_needed; | |
91 | int guest_efer_loaded; | |
92 | } host_state; | |
93 | struct { | |
94 | int vm86_active; | |
95 | u8 save_iopl; | |
96 | struct kvm_save_segment { | |
97 | u16 selector; | |
98 | unsigned long base; | |
99 | u32 limit; | |
100 | u32 ar; | |
101 | } tr, es, ds, fs, gs; | |
102 | struct { | |
103 | bool pending; | |
104 | u8 vector; | |
105 | unsigned rip; | |
106 | } irq; | |
107 | } rmode; | |
108 | int vpid; | |
109 | bool emulation_required; | |
110 | enum emulation_result invalid_state_emulation_result; | |
111 | ||
112 | /* Support for vnmi-less CPUs */ | |
113 | int soft_vnmi_blocked; | |
114 | ktime_t entry_time; | |
115 | s64 vnmi_blocked_time; | |
116 | u32 exit_reason; | |
117 | }; | |
118 | ||
119 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |
120 | { | |
121 | return container_of(vcpu, struct vcpu_vmx, vcpu); | |
122 | } | |
123 | ||
124 | static int init_rmode(struct kvm *kvm); | |
125 | static u64 construct_eptp(unsigned long root_hpa); | |
126 | ||
127 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | |
128 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | |
129 | static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); | |
130 | ||
131 | static unsigned long *vmx_io_bitmap_a; | |
132 | static unsigned long *vmx_io_bitmap_b; | |
133 | static unsigned long *vmx_msr_bitmap_legacy; | |
134 | static unsigned long *vmx_msr_bitmap_longmode; | |
135 | ||
136 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); | |
137 | static DEFINE_SPINLOCK(vmx_vpid_lock); | |
138 | ||
139 | static struct vmcs_config { | |
140 | int size; | |
141 | int order; | |
142 | u32 revision_id; | |
143 | u32 pin_based_exec_ctrl; | |
144 | u32 cpu_based_exec_ctrl; | |
145 | u32 cpu_based_2nd_exec_ctrl; | |
146 | u32 vmexit_ctrl; | |
147 | u32 vmentry_ctrl; | |
148 | } vmcs_config; | |
149 | ||
150 | static struct vmx_capability { | |
151 | u32 ept; | |
152 | u32 vpid; | |
153 | } vmx_capability; | |
154 | ||
155 | #define VMX_SEGMENT_FIELD(seg) \ | |
156 | [VCPU_SREG_##seg] = { \ | |
157 | .selector = GUEST_##seg##_SELECTOR, \ | |
158 | .base = GUEST_##seg##_BASE, \ | |
159 | .limit = GUEST_##seg##_LIMIT, \ | |
160 | .ar_bytes = GUEST_##seg##_AR_BYTES, \ | |
161 | } | |
162 | ||
163 | static struct kvm_vmx_segment_field { | |
164 | unsigned selector; | |
165 | unsigned base; | |
166 | unsigned limit; | |
167 | unsigned ar_bytes; | |
168 | } kvm_vmx_segment_fields[] = { | |
169 | VMX_SEGMENT_FIELD(CS), | |
170 | VMX_SEGMENT_FIELD(DS), | |
171 | VMX_SEGMENT_FIELD(ES), | |
172 | VMX_SEGMENT_FIELD(FS), | |
173 | VMX_SEGMENT_FIELD(GS), | |
174 | VMX_SEGMENT_FIELD(SS), | |
175 | VMX_SEGMENT_FIELD(TR), | |
176 | VMX_SEGMENT_FIELD(LDTR), | |
177 | }; | |
178 | ||
179 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); | |
180 | ||
181 | /* | |
182 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it | |
183 | * away by decrementing the array size. | |
184 | */ | |
185 | static const u32 vmx_msr_index[] = { | |
186 | #ifdef CONFIG_X86_64 | |
187 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, | |
188 | #endif | |
189 | MSR_EFER, MSR_K6_STAR, | |
190 | }; | |
191 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | |
192 | ||
193 | static void load_msrs(struct kvm_msr_entry *e, int n) | |
194 | { | |
195 | int i; | |
196 | ||
197 | for (i = 0; i < n; ++i) | |
198 | wrmsrl(e[i].index, e[i].data); | |
199 | } | |
200 | ||
201 | static void save_msrs(struct kvm_msr_entry *e, int n) | |
202 | { | |
203 | int i; | |
204 | ||
205 | for (i = 0; i < n; ++i) | |
206 | rdmsrl(e[i].index, e[i].data); | |
207 | } | |
208 | ||
209 | static inline int is_page_fault(u32 intr_info) | |
210 | { | |
211 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | |
212 | INTR_INFO_VALID_MASK)) == | |
213 | (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); | |
214 | } | |
215 | ||
216 | static inline int is_no_device(u32 intr_info) | |
217 | { | |
218 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | |
219 | INTR_INFO_VALID_MASK)) == | |
220 | (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); | |
221 | } | |
222 | ||
223 | static inline int is_invalid_opcode(u32 intr_info) | |
224 | { | |
225 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | |
226 | INTR_INFO_VALID_MASK)) == | |
227 | (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); | |
228 | } | |
229 | ||
230 | static inline int is_external_interrupt(u32 intr_info) | |
231 | { | |
232 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) | |
233 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | |
234 | } | |
235 | ||
236 | static inline int is_machine_check(u32 intr_info) | |
237 | { | |
238 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | |
239 | INTR_INFO_VALID_MASK)) == | |
240 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); | |
241 | } | |
242 | ||
243 | static inline int cpu_has_vmx_msr_bitmap(void) | |
244 | { | |
245 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; | |
246 | } | |
247 | ||
248 | static inline int cpu_has_vmx_tpr_shadow(void) | |
249 | { | |
250 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; | |
251 | } | |
252 | ||
253 | static inline int vm_need_tpr_shadow(struct kvm *kvm) | |
254 | { | |
255 | return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); | |
256 | } | |
257 | ||
258 | static inline int cpu_has_secondary_exec_ctrls(void) | |
259 | { | |
260 | return vmcs_config.cpu_based_exec_ctrl & | |
261 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | |
262 | } | |
263 | ||
264 | static inline bool cpu_has_vmx_virtualize_apic_accesses(void) | |
265 | { | |
266 | return vmcs_config.cpu_based_2nd_exec_ctrl & | |
267 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | |
268 | } | |
269 | ||
270 | static inline bool cpu_has_vmx_flexpriority(void) | |
271 | { | |
272 | return cpu_has_vmx_tpr_shadow() && | |
273 | cpu_has_vmx_virtualize_apic_accesses(); | |
274 | } | |
275 | ||
276 | static inline bool cpu_has_vmx_ept_execute_only(void) | |
277 | { | |
278 | return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT); | |
279 | } | |
280 | ||
281 | static inline bool cpu_has_vmx_eptp_uncacheable(void) | |
282 | { | |
283 | return !!(vmx_capability.ept & VMX_EPTP_UC_BIT); | |
284 | } | |
285 | ||
286 | static inline bool cpu_has_vmx_eptp_writeback(void) | |
287 | { | |
288 | return !!(vmx_capability.ept & VMX_EPTP_WB_BIT); | |
289 | } | |
290 | ||
291 | static inline bool cpu_has_vmx_ept_2m_page(void) | |
292 | { | |
293 | return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT); | |
294 | } | |
295 | ||
296 | static inline int cpu_has_vmx_invept_individual_addr(void) | |
297 | { | |
298 | return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT); | |
299 | } | |
300 | ||
301 | static inline int cpu_has_vmx_invept_context(void) | |
302 | { | |
303 | return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT); | |
304 | } | |
305 | ||
306 | static inline int cpu_has_vmx_invept_global(void) | |
307 | { | |
308 | return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT); | |
309 | } | |
310 | ||
311 | static inline int cpu_has_vmx_ept(void) | |
312 | { | |
313 | return vmcs_config.cpu_based_2nd_exec_ctrl & | |
314 | SECONDARY_EXEC_ENABLE_EPT; | |
315 | } | |
316 | ||
317 | static inline int cpu_has_vmx_unrestricted_guest(void) | |
318 | { | |
319 | return vmcs_config.cpu_based_2nd_exec_ctrl & | |
320 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | |
321 | } | |
322 | ||
323 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | |
324 | { | |
325 | return flexpriority_enabled && | |
326 | (cpu_has_vmx_virtualize_apic_accesses()) && | |
327 | (irqchip_in_kernel(kvm)); | |
328 | } | |
329 | ||
330 | static inline int cpu_has_vmx_vpid(void) | |
331 | { | |
332 | return vmcs_config.cpu_based_2nd_exec_ctrl & | |
333 | SECONDARY_EXEC_ENABLE_VPID; | |
334 | } | |
335 | ||
336 | static inline int cpu_has_virtual_nmis(void) | |
337 | { | |
338 | return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; | |
339 | } | |
340 | ||
341 | static inline bool report_flexpriority(void) | |
342 | { | |
343 | return flexpriority_enabled; | |
344 | } | |
345 | ||
346 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | |
347 | { | |
348 | int i; | |
349 | ||
350 | for (i = 0; i < vmx->nmsrs; ++i) | |
351 | if (vmx->guest_msrs[i].index == msr) | |
352 | return i; | |
353 | return -1; | |
354 | } | |
355 | ||
356 | static inline void __invvpid(int ext, u16 vpid, gva_t gva) | |
357 | { | |
358 | struct { | |
359 | u64 vpid : 16; | |
360 | u64 rsvd : 48; | |
361 | u64 gva; | |
362 | } operand = { vpid, 0, gva }; | |
363 | ||
364 | asm volatile (__ex(ASM_VMX_INVVPID) | |
365 | /* CF==1 or ZF==1 --> rc = -1 */ | |
366 | "; ja 1f ; ud2 ; 1:" | |
367 | : : "a"(&operand), "c"(ext) : "cc", "memory"); | |
368 | } | |
369 | ||
370 | static inline void __invept(int ext, u64 eptp, gpa_t gpa) | |
371 | { | |
372 | struct { | |
373 | u64 eptp, gpa; | |
374 | } operand = {eptp, gpa}; | |
375 | ||
376 | asm volatile (__ex(ASM_VMX_INVEPT) | |
377 | /* CF==1 or ZF==1 --> rc = -1 */ | |
378 | "; ja 1f ; ud2 ; 1:\n" | |
379 | : : "a" (&operand), "c" (ext) : "cc", "memory"); | |
380 | } | |
381 | ||
382 | static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) | |
383 | { | |
384 | int i; | |
385 | ||
386 | i = __find_msr_index(vmx, msr); | |
387 | if (i >= 0) | |
388 | return &vmx->guest_msrs[i]; | |
389 | return NULL; | |
390 | } | |
391 | ||
392 | static void vmcs_clear(struct vmcs *vmcs) | |
393 | { | |
394 | u64 phys_addr = __pa(vmcs); | |
395 | u8 error; | |
396 | ||
397 | asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" | |
398 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | |
399 | : "cc", "memory"); | |
400 | if (error) | |
401 | printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", | |
402 | vmcs, phys_addr); | |
403 | } | |
404 | ||
405 | static void __vcpu_clear(void *arg) | |
406 | { | |
407 | struct vcpu_vmx *vmx = arg; | |
408 | int cpu = raw_smp_processor_id(); | |
409 | ||
410 | if (vmx->vcpu.cpu == cpu) | |
411 | vmcs_clear(vmx->vmcs); | |
412 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) | |
413 | per_cpu(current_vmcs, cpu) = NULL; | |
414 | rdtscll(vmx->vcpu.arch.host_tsc); | |
415 | list_del(&vmx->local_vcpus_link); | |
416 | vmx->vcpu.cpu = -1; | |
417 | vmx->launched = 0; | |
418 | } | |
419 | ||
420 | static void vcpu_clear(struct vcpu_vmx *vmx) | |
421 | { | |
422 | if (vmx->vcpu.cpu == -1) | |
423 | return; | |
424 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); | |
425 | } | |
426 | ||
427 | static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) | |
428 | { | |
429 | if (vmx->vpid == 0) | |
430 | return; | |
431 | ||
432 | __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); | |
433 | } | |
434 | ||
435 | static inline void ept_sync_global(void) | |
436 | { | |
437 | if (cpu_has_vmx_invept_global()) | |
438 | __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); | |
439 | } | |
440 | ||
441 | static inline void ept_sync_context(u64 eptp) | |
442 | { | |
443 | if (enable_ept) { | |
444 | if (cpu_has_vmx_invept_context()) | |
445 | __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); | |
446 | else | |
447 | ept_sync_global(); | |
448 | } | |
449 | } | |
450 | ||
451 | static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) | |
452 | { | |
453 | if (enable_ept) { | |
454 | if (cpu_has_vmx_invept_individual_addr()) | |
455 | __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR, | |
456 | eptp, gpa); | |
457 | else | |
458 | ept_sync_context(eptp); | |
459 | } | |
460 | } | |
461 | ||
462 | static unsigned long vmcs_readl(unsigned long field) | |
463 | { | |
464 | unsigned long value; | |
465 | ||
466 | asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) | |
467 | : "=a"(value) : "d"(field) : "cc"); | |
468 | return value; | |
469 | } | |
470 | ||
471 | static u16 vmcs_read16(unsigned long field) | |
472 | { | |
473 | return vmcs_readl(field); | |
474 | } | |
475 | ||
476 | static u32 vmcs_read32(unsigned long field) | |
477 | { | |
478 | return vmcs_readl(field); | |
479 | } | |
480 | ||
481 | static u64 vmcs_read64(unsigned long field) | |
482 | { | |
483 | #ifdef CONFIG_X86_64 | |
484 | return vmcs_readl(field); | |
485 | #else | |
486 | return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); | |
487 | #endif | |
488 | } | |
489 | ||
490 | static noinline void vmwrite_error(unsigned long field, unsigned long value) | |
491 | { | |
492 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | |
493 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | |
494 | dump_stack(); | |
495 | } | |
496 | ||
497 | static void vmcs_writel(unsigned long field, unsigned long value) | |
498 | { | |
499 | u8 error; | |
500 | ||
501 | asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" | |
502 | : "=q"(error) : "a"(value), "d"(field) : "cc"); | |
503 | if (unlikely(error)) | |
504 | vmwrite_error(field, value); | |
505 | } | |
506 | ||
507 | static void vmcs_write16(unsigned long field, u16 value) | |
508 | { | |
509 | vmcs_writel(field, value); | |
510 | } | |
511 | ||
512 | static void vmcs_write32(unsigned long field, u32 value) | |
513 | { | |
514 | vmcs_writel(field, value); | |
515 | } | |
516 | ||
517 | static void vmcs_write64(unsigned long field, u64 value) | |
518 | { | |
519 | vmcs_writel(field, value); | |
520 | #ifndef CONFIG_X86_64 | |
521 | asm volatile (""); | |
522 | vmcs_writel(field+1, value >> 32); | |
523 | #endif | |
524 | } | |
525 | ||
526 | static void vmcs_clear_bits(unsigned long field, u32 mask) | |
527 | { | |
528 | vmcs_writel(field, vmcs_readl(field) & ~mask); | |
529 | } | |
530 | ||
531 | static void vmcs_set_bits(unsigned long field, u32 mask) | |
532 | { | |
533 | vmcs_writel(field, vmcs_readl(field) | mask); | |
534 | } | |
535 | ||
536 | static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |
537 | { | |
538 | u32 eb; | |
539 | ||
540 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); | |
541 | if (!vcpu->fpu_active) | |
542 | eb |= 1u << NM_VECTOR; | |
543 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { | |
544 | if (vcpu->guest_debug & | |
545 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | |
546 | eb |= 1u << DB_VECTOR; | |
547 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | |
548 | eb |= 1u << BP_VECTOR; | |
549 | } | |
550 | if (to_vmx(vcpu)->rmode.vm86_active) | |
551 | eb = ~0; | |
552 | if (enable_ept) | |
553 | eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ | |
554 | vmcs_write32(EXCEPTION_BITMAP, eb); | |
555 | } | |
556 | ||
557 | static void reload_tss(void) | |
558 | { | |
559 | /* | |
560 | * VT restores TR but not its size. Useless. | |
561 | */ | |
562 | struct descriptor_table gdt; | |
563 | struct desc_struct *descs; | |
564 | ||
565 | kvm_get_gdt(&gdt); | |
566 | descs = (void *)gdt.base; | |
567 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | |
568 | load_TR_desc(); | |
569 | } | |
570 | ||
571 | static void load_transition_efer(struct vcpu_vmx *vmx) | |
572 | { | |
573 | int efer_offset = vmx->msr_offset_efer; | |
574 | u64 host_efer = vmx->host_msrs[efer_offset].data; | |
575 | u64 guest_efer = vmx->guest_msrs[efer_offset].data; | |
576 | u64 ignore_bits; | |
577 | ||
578 | if (efer_offset < 0) | |
579 | return; | |
580 | /* | |
581 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless | |
582 | * outside long mode | |
583 | */ | |
584 | ignore_bits = EFER_NX | EFER_SCE; | |
585 | #ifdef CONFIG_X86_64 | |
586 | ignore_bits |= EFER_LMA | EFER_LME; | |
587 | /* SCE is meaningful only in long mode on Intel */ | |
588 | if (guest_efer & EFER_LMA) | |
589 | ignore_bits &= ~(u64)EFER_SCE; | |
590 | #endif | |
591 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) | |
592 | return; | |
593 | ||
594 | vmx->host_state.guest_efer_loaded = 1; | |
595 | guest_efer &= ~ignore_bits; | |
596 | guest_efer |= host_efer & ignore_bits; | |
597 | wrmsrl(MSR_EFER, guest_efer); | |
598 | vmx->vcpu.stat.efer_reload++; | |
599 | } | |
600 | ||
601 | static void reload_host_efer(struct vcpu_vmx *vmx) | |
602 | { | |
603 | if (vmx->host_state.guest_efer_loaded) { | |
604 | vmx->host_state.guest_efer_loaded = 0; | |
605 | load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); | |
606 | } | |
607 | } | |
608 | ||
609 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |
610 | { | |
611 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
612 | ||
613 | if (vmx->host_state.loaded) | |
614 | return; | |
615 | ||
616 | vmx->host_state.loaded = 1; | |
617 | /* | |
618 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | |
619 | * allow segment selectors with cpl > 0 or ti == 1. | |
620 | */ | |
621 | vmx->host_state.ldt_sel = kvm_read_ldt(); | |
622 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | |
623 | vmx->host_state.fs_sel = kvm_read_fs(); | |
624 | if (!(vmx->host_state.fs_sel & 7)) { | |
625 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | |
626 | vmx->host_state.fs_reload_needed = 0; | |
627 | } else { | |
628 | vmcs_write16(HOST_FS_SELECTOR, 0); | |
629 | vmx->host_state.fs_reload_needed = 1; | |
630 | } | |
631 | vmx->host_state.gs_sel = kvm_read_gs(); | |
632 | if (!(vmx->host_state.gs_sel & 7)) | |
633 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); | |
634 | else { | |
635 | vmcs_write16(HOST_GS_SELECTOR, 0); | |
636 | vmx->host_state.gs_ldt_reload_needed = 1; | |
637 | } | |
638 | ||
639 | #ifdef CONFIG_X86_64 | |
640 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | |
641 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | |
642 | #else | |
643 | vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); | |
644 | vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); | |
645 | #endif | |
646 | ||
647 | #ifdef CONFIG_X86_64 | |
648 | if (is_long_mode(&vmx->vcpu)) | |
649 | save_msrs(vmx->host_msrs + | |
650 | vmx->msr_offset_kernel_gs_base, 1); | |
651 | ||
652 | #endif | |
653 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | |
654 | load_transition_efer(vmx); | |
655 | } | |
656 | ||
657 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |
658 | { | |
659 | unsigned long flags; | |
660 | ||
661 | if (!vmx->host_state.loaded) | |
662 | return; | |
663 | ||
664 | ++vmx->vcpu.stat.host_state_reload; | |
665 | vmx->host_state.loaded = 0; | |
666 | if (vmx->host_state.fs_reload_needed) | |
667 | kvm_load_fs(vmx->host_state.fs_sel); | |
668 | if (vmx->host_state.gs_ldt_reload_needed) { | |
669 | kvm_load_ldt(vmx->host_state.ldt_sel); | |
670 | /* | |
671 | * If we have to reload gs, we must take care to | |
672 | * preserve our gs base. | |
673 | */ | |
674 | local_irq_save(flags); | |
675 | kvm_load_gs(vmx->host_state.gs_sel); | |
676 | #ifdef CONFIG_X86_64 | |
677 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | |
678 | #endif | |
679 | local_irq_restore(flags); | |
680 | } | |
681 | reload_tss(); | |
682 | save_msrs(vmx->guest_msrs, vmx->save_nmsrs); | |
683 | load_msrs(vmx->host_msrs, vmx->save_nmsrs); | |
684 | reload_host_efer(vmx); | |
685 | } | |
686 | ||
687 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | |
688 | { | |
689 | preempt_disable(); | |
690 | __vmx_load_host_state(vmx); | |
691 | preempt_enable(); | |
692 | } | |
693 | ||
694 | /* | |
695 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes | |
696 | * vcpu mutex is already taken. | |
697 | */ | |
698 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
699 | { | |
700 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
701 | u64 phys_addr = __pa(vmx->vmcs); | |
702 | u64 tsc_this, delta, new_offset; | |
703 | ||
704 | if (vcpu->cpu != cpu) { | |
705 | vcpu_clear(vmx); | |
706 | kvm_migrate_timers(vcpu); | |
707 | vpid_sync_vcpu_all(vmx); | |
708 | local_irq_disable(); | |
709 | list_add(&vmx->local_vcpus_link, | |
710 | &per_cpu(vcpus_on_cpu, cpu)); | |
711 | local_irq_enable(); | |
712 | } | |
713 | ||
714 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | |
715 | u8 error; | |
716 | ||
717 | per_cpu(current_vmcs, cpu) = vmx->vmcs; | |
718 | asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" | |
719 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | |
720 | : "cc"); | |
721 | if (error) | |
722 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", | |
723 | vmx->vmcs, phys_addr); | |
724 | } | |
725 | ||
726 | if (vcpu->cpu != cpu) { | |
727 | struct descriptor_table dt; | |
728 | unsigned long sysenter_esp; | |
729 | ||
730 | vcpu->cpu = cpu; | |
731 | /* | |
732 | * Linux uses per-cpu TSS and GDT, so set these when switching | |
733 | * processors. | |
734 | */ | |
735 | vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ | |
736 | kvm_get_gdt(&dt); | |
737 | vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ | |
738 | ||
739 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); | |
740 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ | |
741 | ||
742 | /* | |
743 | * Make sure the time stamp counter is monotonous. | |
744 | */ | |
745 | rdtscll(tsc_this); | |
746 | if (tsc_this < vcpu->arch.host_tsc) { | |
747 | delta = vcpu->arch.host_tsc - tsc_this; | |
748 | new_offset = vmcs_read64(TSC_OFFSET) + delta; | |
749 | vmcs_write64(TSC_OFFSET, new_offset); | |
750 | } | |
751 | } | |
752 | } | |
753 | ||
754 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | |
755 | { | |
756 | __vmx_load_host_state(to_vmx(vcpu)); | |
757 | } | |
758 | ||
759 | static void vmx_fpu_activate(struct kvm_vcpu *vcpu) | |
760 | { | |
761 | if (vcpu->fpu_active) | |
762 | return; | |
763 | vcpu->fpu_active = 1; | |
764 | vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); | |
765 | if (vcpu->arch.cr0 & X86_CR0_TS) | |
766 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | |
767 | update_exception_bitmap(vcpu); | |
768 | } | |
769 | ||
770 | static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |
771 | { | |
772 | if (!vcpu->fpu_active) | |
773 | return; | |
774 | vcpu->fpu_active = 0; | |
775 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | |
776 | update_exception_bitmap(vcpu); | |
777 | } | |
778 | ||
779 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | |
780 | { | |
781 | return vmcs_readl(GUEST_RFLAGS); | |
782 | } | |
783 | ||
784 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |
785 | { | |
786 | if (to_vmx(vcpu)->rmode.vm86_active) | |
787 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | |
788 | vmcs_writel(GUEST_RFLAGS, rflags); | |
789 | } | |
790 | ||
791 | static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |
792 | { | |
793 | u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | |
794 | int ret = 0; | |
795 | ||
796 | if (interruptibility & GUEST_INTR_STATE_STI) | |
797 | ret |= X86_SHADOW_INT_STI; | |
798 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) | |
799 | ret |= X86_SHADOW_INT_MOV_SS; | |
800 | ||
801 | return ret & mask; | |
802 | } | |
803 | ||
804 | static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |
805 | { | |
806 | u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | |
807 | u32 interruptibility = interruptibility_old; | |
808 | ||
809 | interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); | |
810 | ||
811 | if (mask & X86_SHADOW_INT_MOV_SS) | |
812 | interruptibility |= GUEST_INTR_STATE_MOV_SS; | |
813 | if (mask & X86_SHADOW_INT_STI) | |
814 | interruptibility |= GUEST_INTR_STATE_STI; | |
815 | ||
816 | if ((interruptibility != interruptibility_old)) | |
817 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); | |
818 | } | |
819 | ||
820 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |
821 | { | |
822 | unsigned long rip; | |
823 | ||
824 | rip = kvm_rip_read(vcpu); | |
825 | rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | |
826 | kvm_rip_write(vcpu, rip); | |
827 | ||
828 | /* skipping an emulated instruction also counts */ | |
829 | vmx_set_interrupt_shadow(vcpu, 0); | |
830 | } | |
831 | ||
832 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |
833 | bool has_error_code, u32 error_code) | |
834 | { | |
835 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
836 | u32 intr_info = nr | INTR_INFO_VALID_MASK; | |
837 | ||
838 | if (has_error_code) { | |
839 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | |
840 | intr_info |= INTR_INFO_DELIVER_CODE_MASK; | |
841 | } | |
842 | ||
843 | if (vmx->rmode.vm86_active) { | |
844 | vmx->rmode.irq.pending = true; | |
845 | vmx->rmode.irq.vector = nr; | |
846 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | |
847 | if (kvm_exception_is_soft(nr)) | |
848 | vmx->rmode.irq.rip += | |
849 | vmx->vcpu.arch.event_exit_inst_len; | |
850 | intr_info |= INTR_TYPE_SOFT_INTR; | |
851 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); | |
852 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | |
853 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); | |
854 | return; | |
855 | } | |
856 | ||
857 | if (kvm_exception_is_soft(nr)) { | |
858 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | |
859 | vmx->vcpu.arch.event_exit_inst_len); | |
860 | intr_info |= INTR_TYPE_SOFT_EXCEPTION; | |
861 | } else | |
862 | intr_info |= INTR_TYPE_HARD_EXCEPTION; | |
863 | ||
864 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); | |
865 | } | |
866 | ||
867 | /* | |
868 | * Swap MSR entry in host/guest MSR entry array. | |
869 | */ | |
870 | #ifdef CONFIG_X86_64 | |
871 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | |
872 | { | |
873 | struct kvm_msr_entry tmp; | |
874 | ||
875 | tmp = vmx->guest_msrs[to]; | |
876 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; | |
877 | vmx->guest_msrs[from] = tmp; | |
878 | tmp = vmx->host_msrs[to]; | |
879 | vmx->host_msrs[to] = vmx->host_msrs[from]; | |
880 | vmx->host_msrs[from] = tmp; | |
881 | } | |
882 | #endif | |
883 | ||
884 | /* | |
885 | * Set up the vmcs to automatically save and restore system | |
886 | * msrs. Don't touch the 64-bit msrs if the guest is in legacy | |
887 | * mode, as fiddling with msrs is very expensive. | |
888 | */ | |
889 | static void setup_msrs(struct vcpu_vmx *vmx) | |
890 | { | |
891 | int save_nmsrs; | |
892 | unsigned long *msr_bitmap; | |
893 | ||
894 | vmx_load_host_state(vmx); | |
895 | save_nmsrs = 0; | |
896 | #ifdef CONFIG_X86_64 | |
897 | if (is_long_mode(&vmx->vcpu)) { | |
898 | int index; | |
899 | ||
900 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); | |
901 | if (index >= 0) | |
902 | move_msr_up(vmx, index, save_nmsrs++); | |
903 | index = __find_msr_index(vmx, MSR_LSTAR); | |
904 | if (index >= 0) | |
905 | move_msr_up(vmx, index, save_nmsrs++); | |
906 | index = __find_msr_index(vmx, MSR_CSTAR); | |
907 | if (index >= 0) | |
908 | move_msr_up(vmx, index, save_nmsrs++); | |
909 | index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | |
910 | if (index >= 0) | |
911 | move_msr_up(vmx, index, save_nmsrs++); | |
912 | /* | |
913 | * MSR_K6_STAR is only needed on long mode guests, and only | |
914 | * if efer.sce is enabled. | |
915 | */ | |
916 | index = __find_msr_index(vmx, MSR_K6_STAR); | |
917 | if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) | |
918 | move_msr_up(vmx, index, save_nmsrs++); | |
919 | } | |
920 | #endif | |
921 | vmx->save_nmsrs = save_nmsrs; | |
922 | ||
923 | #ifdef CONFIG_X86_64 | |
924 | vmx->msr_offset_kernel_gs_base = | |
925 | __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | |
926 | #endif | |
927 | vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); | |
928 | ||
929 | if (cpu_has_vmx_msr_bitmap()) { | |
930 | if (is_long_mode(&vmx->vcpu)) | |
931 | msr_bitmap = vmx_msr_bitmap_longmode; | |
932 | else | |
933 | msr_bitmap = vmx_msr_bitmap_legacy; | |
934 | ||
935 | vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); | |
936 | } | |
937 | } | |
938 | ||
939 | /* | |
940 | * reads and returns guest's timestamp counter "register" | |
941 | * guest_tsc = host_tsc + tsc_offset -- 21.3 | |
942 | */ | |
943 | static u64 guest_read_tsc(void) | |
944 | { | |
945 | u64 host_tsc, tsc_offset; | |
946 | ||
947 | rdtscll(host_tsc); | |
948 | tsc_offset = vmcs_read64(TSC_OFFSET); | |
949 | return host_tsc + tsc_offset; | |
950 | } | |
951 | ||
952 | /* | |
953 | * writes 'guest_tsc' into guest's timestamp counter "register" | |
954 | * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc | |
955 | */ | |
956 | static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) | |
957 | { | |
958 | vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); | |
959 | } | |
960 | ||
961 | /* | |
962 | * Reads an msr value (of 'msr_index') into 'pdata'. | |
963 | * Returns 0 on success, non-0 otherwise. | |
964 | * Assumes vcpu_load() was already called. | |
965 | */ | |
966 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |
967 | { | |
968 | u64 data; | |
969 | struct kvm_msr_entry *msr; | |
970 | ||
971 | if (!pdata) { | |
972 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); | |
973 | return -EINVAL; | |
974 | } | |
975 | ||
976 | switch (msr_index) { | |
977 | #ifdef CONFIG_X86_64 | |
978 | case MSR_FS_BASE: | |
979 | data = vmcs_readl(GUEST_FS_BASE); | |
980 | break; | |
981 | case MSR_GS_BASE: | |
982 | data = vmcs_readl(GUEST_GS_BASE); | |
983 | break; | |
984 | case MSR_EFER: | |
985 | return kvm_get_msr_common(vcpu, msr_index, pdata); | |
986 | #endif | |
987 | case MSR_IA32_TSC: | |
988 | data = guest_read_tsc(); | |
989 | break; | |
990 | case MSR_IA32_SYSENTER_CS: | |
991 | data = vmcs_read32(GUEST_SYSENTER_CS); | |
992 | break; | |
993 | case MSR_IA32_SYSENTER_EIP: | |
994 | data = vmcs_readl(GUEST_SYSENTER_EIP); | |
995 | break; | |
996 | case MSR_IA32_SYSENTER_ESP: | |
997 | data = vmcs_readl(GUEST_SYSENTER_ESP); | |
998 | break; | |
999 | default: | |
1000 | vmx_load_host_state(to_vmx(vcpu)); | |
1001 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | |
1002 | if (msr) { | |
1003 | data = msr->data; | |
1004 | break; | |
1005 | } | |
1006 | return kvm_get_msr_common(vcpu, msr_index, pdata); | |
1007 | } | |
1008 | ||
1009 | *pdata = data; | |
1010 | return 0; | |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * Writes msr value into into the appropriate "register". | |
1015 | * Returns 0 on success, non-0 otherwise. | |
1016 | * Assumes vcpu_load() was already called. | |
1017 | */ | |
1018 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |
1019 | { | |
1020 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1021 | struct kvm_msr_entry *msr; | |
1022 | u64 host_tsc; | |
1023 | int ret = 0; | |
1024 | ||
1025 | switch (msr_index) { | |
1026 | case MSR_EFER: | |
1027 | vmx_load_host_state(vmx); | |
1028 | ret = kvm_set_msr_common(vcpu, msr_index, data); | |
1029 | break; | |
1030 | #ifdef CONFIG_X86_64 | |
1031 | case MSR_FS_BASE: | |
1032 | vmcs_writel(GUEST_FS_BASE, data); | |
1033 | break; | |
1034 | case MSR_GS_BASE: | |
1035 | vmcs_writel(GUEST_GS_BASE, data); | |
1036 | break; | |
1037 | #endif | |
1038 | case MSR_IA32_SYSENTER_CS: | |
1039 | vmcs_write32(GUEST_SYSENTER_CS, data); | |
1040 | break; | |
1041 | case MSR_IA32_SYSENTER_EIP: | |
1042 | vmcs_writel(GUEST_SYSENTER_EIP, data); | |
1043 | break; | |
1044 | case MSR_IA32_SYSENTER_ESP: | |
1045 | vmcs_writel(GUEST_SYSENTER_ESP, data); | |
1046 | break; | |
1047 | case MSR_IA32_TSC: | |
1048 | rdtscll(host_tsc); | |
1049 | guest_write_tsc(data, host_tsc); | |
1050 | break; | |
1051 | case MSR_IA32_CR_PAT: | |
1052 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | |
1053 | vmcs_write64(GUEST_IA32_PAT, data); | |
1054 | vcpu->arch.pat = data; | |
1055 | break; | |
1056 | } | |
1057 | /* Otherwise falls through to kvm_set_msr_common */ | |
1058 | default: | |
1059 | vmx_load_host_state(vmx); | |
1060 | msr = find_msr_entry(vmx, msr_index); | |
1061 | if (msr) { | |
1062 | msr->data = data; | |
1063 | break; | |
1064 | } | |
1065 | ret = kvm_set_msr_common(vcpu, msr_index, data); | |
1066 | } | |
1067 | ||
1068 | return ret; | |
1069 | } | |
1070 | ||
1071 | static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | |
1072 | { | |
1073 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); | |
1074 | switch (reg) { | |
1075 | case VCPU_REGS_RSP: | |
1076 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); | |
1077 | break; | |
1078 | case VCPU_REGS_RIP: | |
1079 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); | |
1080 | break; | |
1081 | case VCPU_EXREG_PDPTR: | |
1082 | if (enable_ept) | |
1083 | ept_save_pdptrs(vcpu); | |
1084 | break; | |
1085 | default: | |
1086 | break; | |
1087 | } | |
1088 | } | |
1089 | ||
1090 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | |
1091 | { | |
1092 | int old_debug = vcpu->guest_debug; | |
1093 | unsigned long flags; | |
1094 | ||
1095 | vcpu->guest_debug = dbg->control; | |
1096 | if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) | |
1097 | vcpu->guest_debug = 0; | |
1098 | ||
1099 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | |
1100 | vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]); | |
1101 | else | |
1102 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | |
1103 | ||
1104 | flags = vmcs_readl(GUEST_RFLAGS); | |
1105 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | |
1106 | flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | |
1107 | else if (old_debug & KVM_GUESTDBG_SINGLESTEP) | |
1108 | flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | |
1109 | vmcs_writel(GUEST_RFLAGS, flags); | |
1110 | ||
1111 | update_exception_bitmap(vcpu); | |
1112 | ||
1113 | return 0; | |
1114 | } | |
1115 | ||
1116 | static __init int cpu_has_kvm_support(void) | |
1117 | { | |
1118 | return cpu_has_vmx(); | |
1119 | } | |
1120 | ||
1121 | static __init int vmx_disabled_by_bios(void) | |
1122 | { | |
1123 | u64 msr; | |
1124 | ||
1125 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); | |
1126 | return (msr & (FEATURE_CONTROL_LOCKED | | |
1127 | FEATURE_CONTROL_VMXON_ENABLED)) | |
1128 | == FEATURE_CONTROL_LOCKED; | |
1129 | /* locked but not enabled */ | |
1130 | } | |
1131 | ||
1132 | static void hardware_enable(void *garbage) | |
1133 | { | |
1134 | int cpu = raw_smp_processor_id(); | |
1135 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | |
1136 | u64 old; | |
1137 | ||
1138 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); | |
1139 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | |
1140 | if ((old & (FEATURE_CONTROL_LOCKED | | |
1141 | FEATURE_CONTROL_VMXON_ENABLED)) | |
1142 | != (FEATURE_CONTROL_LOCKED | | |
1143 | FEATURE_CONTROL_VMXON_ENABLED)) | |
1144 | /* enable and lock */ | |
1145 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | | |
1146 | FEATURE_CONTROL_LOCKED | | |
1147 | FEATURE_CONTROL_VMXON_ENABLED); | |
1148 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | |
1149 | asm volatile (ASM_VMX_VMXON_RAX | |
1150 | : : "a"(&phys_addr), "m"(phys_addr) | |
1151 | : "memory", "cc"); | |
1152 | } | |
1153 | ||
1154 | static void vmclear_local_vcpus(void) | |
1155 | { | |
1156 | int cpu = raw_smp_processor_id(); | |
1157 | struct vcpu_vmx *vmx, *n; | |
1158 | ||
1159 | list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), | |
1160 | local_vcpus_link) | |
1161 | __vcpu_clear(vmx); | |
1162 | } | |
1163 | ||
1164 | ||
1165 | /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() | |
1166 | * tricks. | |
1167 | */ | |
1168 | static void kvm_cpu_vmxoff(void) | |
1169 | { | |
1170 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | |
1171 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | |
1172 | } | |
1173 | ||
1174 | static void hardware_disable(void *garbage) | |
1175 | { | |
1176 | vmclear_local_vcpus(); | |
1177 | kvm_cpu_vmxoff(); | |
1178 | } | |
1179 | ||
1180 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | |
1181 | u32 msr, u32 *result) | |
1182 | { | |
1183 | u32 vmx_msr_low, vmx_msr_high; | |
1184 | u32 ctl = ctl_min | ctl_opt; | |
1185 | ||
1186 | rdmsr(msr, vmx_msr_low, vmx_msr_high); | |
1187 | ||
1188 | ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ | |
1189 | ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ | |
1190 | ||
1191 | /* Ensure minimum (required) set of control bits are supported. */ | |
1192 | if (ctl_min & ~ctl) | |
1193 | return -EIO; | |
1194 | ||
1195 | *result = ctl; | |
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |
1200 | { | |
1201 | u32 vmx_msr_low, vmx_msr_high; | |
1202 | u32 min, opt, min2, opt2; | |
1203 | u32 _pin_based_exec_control = 0; | |
1204 | u32 _cpu_based_exec_control = 0; | |
1205 | u32 _cpu_based_2nd_exec_control = 0; | |
1206 | u32 _vmexit_control = 0; | |
1207 | u32 _vmentry_control = 0; | |
1208 | ||
1209 | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; | |
1210 | opt = PIN_BASED_VIRTUAL_NMIS; | |
1211 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, | |
1212 | &_pin_based_exec_control) < 0) | |
1213 | return -EIO; | |
1214 | ||
1215 | min = CPU_BASED_HLT_EXITING | | |
1216 | #ifdef CONFIG_X86_64 | |
1217 | CPU_BASED_CR8_LOAD_EXITING | | |
1218 | CPU_BASED_CR8_STORE_EXITING | | |
1219 | #endif | |
1220 | CPU_BASED_CR3_LOAD_EXITING | | |
1221 | CPU_BASED_CR3_STORE_EXITING | | |
1222 | CPU_BASED_USE_IO_BITMAPS | | |
1223 | CPU_BASED_MOV_DR_EXITING | | |
1224 | CPU_BASED_USE_TSC_OFFSETING | | |
1225 | CPU_BASED_INVLPG_EXITING; | |
1226 | opt = CPU_BASED_TPR_SHADOW | | |
1227 | CPU_BASED_USE_MSR_BITMAPS | | |
1228 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | |
1229 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | |
1230 | &_cpu_based_exec_control) < 0) | |
1231 | return -EIO; | |
1232 | #ifdef CONFIG_X86_64 | |
1233 | if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) | |
1234 | _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & | |
1235 | ~CPU_BASED_CR8_STORE_EXITING; | |
1236 | #endif | |
1237 | if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { | |
1238 | min2 = 0; | |
1239 | opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | |
1240 | SECONDARY_EXEC_WBINVD_EXITING | | |
1241 | SECONDARY_EXEC_ENABLE_VPID | | |
1242 | SECONDARY_EXEC_ENABLE_EPT | | |
1243 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | |
1244 | if (adjust_vmx_controls(min2, opt2, | |
1245 | MSR_IA32_VMX_PROCBASED_CTLS2, | |
1246 | &_cpu_based_2nd_exec_control) < 0) | |
1247 | return -EIO; | |
1248 | } | |
1249 | #ifndef CONFIG_X86_64 | |
1250 | if (!(_cpu_based_2nd_exec_control & | |
1251 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | |
1252 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; | |
1253 | #endif | |
1254 | if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { | |
1255 | /* CR3 accesses and invlpg don't need to cause VM Exits when EPT | |
1256 | enabled */ | |
1257 | min &= ~(CPU_BASED_CR3_LOAD_EXITING | | |
1258 | CPU_BASED_CR3_STORE_EXITING | | |
1259 | CPU_BASED_INVLPG_EXITING); | |
1260 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | |
1261 | &_cpu_based_exec_control) < 0) | |
1262 | return -EIO; | |
1263 | rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, | |
1264 | vmx_capability.ept, vmx_capability.vpid); | |
1265 | } | |
1266 | ||
1267 | min = 0; | |
1268 | #ifdef CONFIG_X86_64 | |
1269 | min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; | |
1270 | #endif | |
1271 | opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; | |
1272 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, | |
1273 | &_vmexit_control) < 0) | |
1274 | return -EIO; | |
1275 | ||
1276 | min = 0; | |
1277 | opt = VM_ENTRY_LOAD_IA32_PAT; | |
1278 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, | |
1279 | &_vmentry_control) < 0) | |
1280 | return -EIO; | |
1281 | ||
1282 | rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); | |
1283 | ||
1284 | /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ | |
1285 | if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) | |
1286 | return -EIO; | |
1287 | ||
1288 | #ifdef CONFIG_X86_64 | |
1289 | /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ | |
1290 | if (vmx_msr_high & (1u<<16)) | |
1291 | return -EIO; | |
1292 | #endif | |
1293 | ||
1294 | /* Require Write-Back (WB) memory type for VMCS accesses. */ | |
1295 | if (((vmx_msr_high >> 18) & 15) != 6) | |
1296 | return -EIO; | |
1297 | ||
1298 | vmcs_conf->size = vmx_msr_high & 0x1fff; | |
1299 | vmcs_conf->order = get_order(vmcs_config.size); | |
1300 | vmcs_conf->revision_id = vmx_msr_low; | |
1301 | ||
1302 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; | |
1303 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; | |
1304 | vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; | |
1305 | vmcs_conf->vmexit_ctrl = _vmexit_control; | |
1306 | vmcs_conf->vmentry_ctrl = _vmentry_control; | |
1307 | ||
1308 | return 0; | |
1309 | } | |
1310 | ||
1311 | static struct vmcs *alloc_vmcs_cpu(int cpu) | |
1312 | { | |
1313 | int node = cpu_to_node(cpu); | |
1314 | struct page *pages; | |
1315 | struct vmcs *vmcs; | |
1316 | ||
1317 | pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); | |
1318 | if (!pages) | |
1319 | return NULL; | |
1320 | vmcs = page_address(pages); | |
1321 | memset(vmcs, 0, vmcs_config.size); | |
1322 | vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ | |
1323 | return vmcs; | |
1324 | } | |
1325 | ||
1326 | static struct vmcs *alloc_vmcs(void) | |
1327 | { | |
1328 | return alloc_vmcs_cpu(raw_smp_processor_id()); | |
1329 | } | |
1330 | ||
1331 | static void free_vmcs(struct vmcs *vmcs) | |
1332 | { | |
1333 | free_pages((unsigned long)vmcs, vmcs_config.order); | |
1334 | } | |
1335 | ||
1336 | static void free_kvm_area(void) | |
1337 | { | |
1338 | int cpu; | |
1339 | ||
1340 | for_each_online_cpu(cpu) | |
1341 | free_vmcs(per_cpu(vmxarea, cpu)); | |
1342 | } | |
1343 | ||
1344 | static __init int alloc_kvm_area(void) | |
1345 | { | |
1346 | int cpu; | |
1347 | ||
1348 | for_each_online_cpu(cpu) { | |
1349 | struct vmcs *vmcs; | |
1350 | ||
1351 | vmcs = alloc_vmcs_cpu(cpu); | |
1352 | if (!vmcs) { | |
1353 | free_kvm_area(); | |
1354 | return -ENOMEM; | |
1355 | } | |
1356 | ||
1357 | per_cpu(vmxarea, cpu) = vmcs; | |
1358 | } | |
1359 | return 0; | |
1360 | } | |
1361 | ||
1362 | static __init int hardware_setup(void) | |
1363 | { | |
1364 | if (setup_vmcs_config(&vmcs_config) < 0) | |
1365 | return -EIO; | |
1366 | ||
1367 | if (boot_cpu_has(X86_FEATURE_NX)) | |
1368 | kvm_enable_efer_bits(EFER_NX); | |
1369 | ||
1370 | if (!cpu_has_vmx_vpid()) | |
1371 | enable_vpid = 0; | |
1372 | ||
1373 | if (!cpu_has_vmx_ept()) { | |
1374 | enable_ept = 0; | |
1375 | enable_unrestricted_guest = 0; | |
1376 | } | |
1377 | ||
1378 | if (!cpu_has_vmx_unrestricted_guest()) | |
1379 | enable_unrestricted_guest = 0; | |
1380 | ||
1381 | if (!cpu_has_vmx_flexpriority()) | |
1382 | flexpriority_enabled = 0; | |
1383 | ||
1384 | if (!cpu_has_vmx_tpr_shadow()) | |
1385 | kvm_x86_ops->update_cr8_intercept = NULL; | |
1386 | ||
1387 | if (enable_ept && !cpu_has_vmx_ept_2m_page()) | |
1388 | kvm_disable_largepages(); | |
1389 | ||
1390 | return alloc_kvm_area(); | |
1391 | } | |
1392 | ||
1393 | static __exit void hardware_unsetup(void) | |
1394 | { | |
1395 | free_kvm_area(); | |
1396 | } | |
1397 | ||
1398 | static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) | |
1399 | { | |
1400 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | |
1401 | ||
1402 | if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) { | |
1403 | vmcs_write16(sf->selector, save->selector); | |
1404 | vmcs_writel(sf->base, save->base); | |
1405 | vmcs_write32(sf->limit, save->limit); | |
1406 | vmcs_write32(sf->ar_bytes, save->ar); | |
1407 | } else { | |
1408 | u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) | |
1409 | << AR_DPL_SHIFT; | |
1410 | vmcs_write32(sf->ar_bytes, 0x93 | dpl); | |
1411 | } | |
1412 | } | |
1413 | ||
1414 | static void enter_pmode(struct kvm_vcpu *vcpu) | |
1415 | { | |
1416 | unsigned long flags; | |
1417 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1418 | ||
1419 | vmx->emulation_required = 1; | |
1420 | vmx->rmode.vm86_active = 0; | |
1421 | ||
1422 | vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base); | |
1423 | vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit); | |
1424 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | |
1425 | ||
1426 | flags = vmcs_readl(GUEST_RFLAGS); | |
1427 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | |
1428 | flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); | |
1429 | vmcs_writel(GUEST_RFLAGS, flags); | |
1430 | ||
1431 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | |
1432 | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); | |
1433 | ||
1434 | update_exception_bitmap(vcpu); | |
1435 | ||
1436 | if (emulate_invalid_guest_state) | |
1437 | return; | |
1438 | ||
1439 | fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es); | |
1440 | fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds); | |
1441 | fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs); | |
1442 | fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs); | |
1443 | ||
1444 | vmcs_write16(GUEST_SS_SELECTOR, 0); | |
1445 | vmcs_write32(GUEST_SS_AR_BYTES, 0x93); | |
1446 | ||
1447 | vmcs_write16(GUEST_CS_SELECTOR, | |
1448 | vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK); | |
1449 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | |
1450 | } | |
1451 | ||
1452 | static gva_t rmode_tss_base(struct kvm *kvm) | |
1453 | { | |
1454 | if (!kvm->arch.tss_addr) { | |
1455 | gfn_t base_gfn = kvm->memslots[0].base_gfn + | |
1456 | kvm->memslots[0].npages - 3; | |
1457 | return base_gfn << PAGE_SHIFT; | |
1458 | } | |
1459 | return kvm->arch.tss_addr; | |
1460 | } | |
1461 | ||
1462 | static void fix_rmode_seg(int seg, struct kvm_save_segment *save) | |
1463 | { | |
1464 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | |
1465 | ||
1466 | save->selector = vmcs_read16(sf->selector); | |
1467 | save->base = vmcs_readl(sf->base); | |
1468 | save->limit = vmcs_read32(sf->limit); | |
1469 | save->ar = vmcs_read32(sf->ar_bytes); | |
1470 | vmcs_write16(sf->selector, save->base >> 4); | |
1471 | vmcs_write32(sf->base, save->base & 0xfffff); | |
1472 | vmcs_write32(sf->limit, 0xffff); | |
1473 | vmcs_write32(sf->ar_bytes, 0xf3); | |
1474 | } | |
1475 | ||
1476 | static void enter_rmode(struct kvm_vcpu *vcpu) | |
1477 | { | |
1478 | unsigned long flags; | |
1479 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1480 | ||
1481 | if (enable_unrestricted_guest) | |
1482 | return; | |
1483 | ||
1484 | vmx->emulation_required = 1; | |
1485 | vmx->rmode.vm86_active = 1; | |
1486 | ||
1487 | vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | |
1488 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); | |
1489 | ||
1490 | vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); | |
1491 | vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); | |
1492 | ||
1493 | vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); | |
1494 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | |
1495 | ||
1496 | flags = vmcs_readl(GUEST_RFLAGS); | |
1497 | vmx->rmode.save_iopl | |
1498 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | |
1499 | ||
1500 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | |
1501 | ||
1502 | vmcs_writel(GUEST_RFLAGS, flags); | |
1503 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); | |
1504 | update_exception_bitmap(vcpu); | |
1505 | ||
1506 | if (emulate_invalid_guest_state) | |
1507 | goto continue_rmode; | |
1508 | ||
1509 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); | |
1510 | vmcs_write32(GUEST_SS_LIMIT, 0xffff); | |
1511 | vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); | |
1512 | ||
1513 | vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); | |
1514 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | |
1515 | if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) | |
1516 | vmcs_writel(GUEST_CS_BASE, 0xf0000); | |
1517 | vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); | |
1518 | ||
1519 | fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); | |
1520 | fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); | |
1521 | fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); | |
1522 | fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); | |
1523 | ||
1524 | continue_rmode: | |
1525 | kvm_mmu_reset_context(vcpu); | |
1526 | init_rmode(vcpu->kvm); | |
1527 | } | |
1528 | ||
1529 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |
1530 | { | |
1531 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1532 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); | |
1533 | ||
1534 | vcpu->arch.shadow_efer = efer; | |
1535 | if (!msr) | |
1536 | return; | |
1537 | if (efer & EFER_LMA) { | |
1538 | vmcs_write32(VM_ENTRY_CONTROLS, | |
1539 | vmcs_read32(VM_ENTRY_CONTROLS) | | |
1540 | VM_ENTRY_IA32E_MODE); | |
1541 | msr->data = efer; | |
1542 | } else { | |
1543 | vmcs_write32(VM_ENTRY_CONTROLS, | |
1544 | vmcs_read32(VM_ENTRY_CONTROLS) & | |
1545 | ~VM_ENTRY_IA32E_MODE); | |
1546 | ||
1547 | msr->data = efer & ~EFER_LME; | |
1548 | } | |
1549 | setup_msrs(vmx); | |
1550 | } | |
1551 | ||
1552 | #ifdef CONFIG_X86_64 | |
1553 | ||
1554 | static void enter_lmode(struct kvm_vcpu *vcpu) | |
1555 | { | |
1556 | u32 guest_tr_ar; | |
1557 | ||
1558 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); | |
1559 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { | |
1560 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", | |
1561 | __func__); | |
1562 | vmcs_write32(GUEST_TR_AR_BYTES, | |
1563 | (guest_tr_ar & ~AR_TYPE_MASK) | |
1564 | | AR_TYPE_BUSY_64_TSS); | |
1565 | } | |
1566 | vcpu->arch.shadow_efer |= EFER_LMA; | |
1567 | vmx_set_efer(vcpu, vcpu->arch.shadow_efer); | |
1568 | } | |
1569 | ||
1570 | static void exit_lmode(struct kvm_vcpu *vcpu) | |
1571 | { | |
1572 | vcpu->arch.shadow_efer &= ~EFER_LMA; | |
1573 | ||
1574 | vmcs_write32(VM_ENTRY_CONTROLS, | |
1575 | vmcs_read32(VM_ENTRY_CONTROLS) | |
1576 | & ~VM_ENTRY_IA32E_MODE); | |
1577 | } | |
1578 | ||
1579 | #endif | |
1580 | ||
1581 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | |
1582 | { | |
1583 | vpid_sync_vcpu_all(to_vmx(vcpu)); | |
1584 | if (enable_ept) | |
1585 | ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); | |
1586 | } | |
1587 | ||
1588 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |
1589 | { | |
1590 | vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; | |
1591 | vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; | |
1592 | } | |
1593 | ||
1594 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) | |
1595 | { | |
1596 | if (!test_bit(VCPU_EXREG_PDPTR, | |
1597 | (unsigned long *)&vcpu->arch.regs_dirty)) | |
1598 | return; | |
1599 | ||
1600 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | |
1601 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); | |
1602 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); | |
1603 | vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); | |
1604 | vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); | |
1605 | } | |
1606 | } | |
1607 | ||
1608 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu) | |
1609 | { | |
1610 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | |
1611 | vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); | |
1612 | vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); | |
1613 | vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); | |
1614 | vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); | |
1615 | } | |
1616 | ||
1617 | __set_bit(VCPU_EXREG_PDPTR, | |
1618 | (unsigned long *)&vcpu->arch.regs_avail); | |
1619 | __set_bit(VCPU_EXREG_PDPTR, | |
1620 | (unsigned long *)&vcpu->arch.regs_dirty); | |
1621 | } | |
1622 | ||
1623 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | |
1624 | ||
1625 | static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, | |
1626 | unsigned long cr0, | |
1627 | struct kvm_vcpu *vcpu) | |
1628 | { | |
1629 | if (!(cr0 & X86_CR0_PG)) { | |
1630 | /* From paging/starting to nonpaging */ | |
1631 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | |
1632 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | | |
1633 | (CPU_BASED_CR3_LOAD_EXITING | | |
1634 | CPU_BASED_CR3_STORE_EXITING)); | |
1635 | vcpu->arch.cr0 = cr0; | |
1636 | vmx_set_cr4(vcpu, vcpu->arch.cr4); | |
1637 | *hw_cr0 &= ~X86_CR0_WP; | |
1638 | } else if (!is_paging(vcpu)) { | |
1639 | /* From nonpaging to paging */ | |
1640 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | |
1641 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & | |
1642 | ~(CPU_BASED_CR3_LOAD_EXITING | | |
1643 | CPU_BASED_CR3_STORE_EXITING)); | |
1644 | vcpu->arch.cr0 = cr0; | |
1645 | vmx_set_cr4(vcpu, vcpu->arch.cr4); | |
1646 | if (!(vcpu->arch.cr0 & X86_CR0_WP)) | |
1647 | *hw_cr0 &= ~X86_CR0_WP; | |
1648 | } | |
1649 | } | |
1650 | ||
1651 | static void ept_update_paging_mode_cr4(unsigned long *hw_cr4, | |
1652 | struct kvm_vcpu *vcpu) | |
1653 | { | |
1654 | if (!is_paging(vcpu)) { | |
1655 | *hw_cr4 &= ~X86_CR4_PAE; | |
1656 | *hw_cr4 |= X86_CR4_PSE; | |
1657 | } else if (!(vcpu->arch.cr4 & X86_CR4_PAE)) | |
1658 | *hw_cr4 &= ~X86_CR4_PAE; | |
1659 | } | |
1660 | ||
1661 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |
1662 | { | |
1663 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1664 | unsigned long hw_cr0; | |
1665 | ||
1666 | if (enable_unrestricted_guest) | |
1667 | hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST) | |
1668 | | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; | |
1669 | else | |
1670 | hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; | |
1671 | ||
1672 | vmx_fpu_deactivate(vcpu); | |
1673 | ||
1674 | if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) | |
1675 | enter_pmode(vcpu); | |
1676 | ||
1677 | if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) | |
1678 | enter_rmode(vcpu); | |
1679 | ||
1680 | #ifdef CONFIG_X86_64 | |
1681 | if (vcpu->arch.shadow_efer & EFER_LME) { | |
1682 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) | |
1683 | enter_lmode(vcpu); | |
1684 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) | |
1685 | exit_lmode(vcpu); | |
1686 | } | |
1687 | #endif | |
1688 | ||
1689 | if (enable_ept) | |
1690 | ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); | |
1691 | ||
1692 | vmcs_writel(CR0_READ_SHADOW, cr0); | |
1693 | vmcs_writel(GUEST_CR0, hw_cr0); | |
1694 | vcpu->arch.cr0 = cr0; | |
1695 | ||
1696 | if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) | |
1697 | vmx_fpu_activate(vcpu); | |
1698 | } | |
1699 | ||
1700 | static u64 construct_eptp(unsigned long root_hpa) | |
1701 | { | |
1702 | u64 eptp; | |
1703 | ||
1704 | /* TODO write the value reading from MSR */ | |
1705 | eptp = VMX_EPT_DEFAULT_MT | | |
1706 | VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; | |
1707 | eptp |= (root_hpa & PAGE_MASK); | |
1708 | ||
1709 | return eptp; | |
1710 | } | |
1711 | ||
1712 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |
1713 | { | |
1714 | unsigned long guest_cr3; | |
1715 | u64 eptp; | |
1716 | ||
1717 | guest_cr3 = cr3; | |
1718 | if (enable_ept) { | |
1719 | eptp = construct_eptp(cr3); | |
1720 | vmcs_write64(EPT_POINTER, eptp); | |
1721 | guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : | |
1722 | vcpu->kvm->arch.ept_identity_map_addr; | |
1723 | } | |
1724 | ||
1725 | vmx_flush_tlb(vcpu); | |
1726 | vmcs_writel(GUEST_CR3, guest_cr3); | |
1727 | if (vcpu->arch.cr0 & X86_CR0_PE) | |
1728 | vmx_fpu_deactivate(vcpu); | |
1729 | } | |
1730 | ||
1731 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |
1732 | { | |
1733 | unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? | |
1734 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); | |
1735 | ||
1736 | vcpu->arch.cr4 = cr4; | |
1737 | if (enable_ept) | |
1738 | ept_update_paging_mode_cr4(&hw_cr4, vcpu); | |
1739 | ||
1740 | vmcs_writel(CR4_READ_SHADOW, cr4); | |
1741 | vmcs_writel(GUEST_CR4, hw_cr4); | |
1742 | } | |
1743 | ||
1744 | static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) | |
1745 | { | |
1746 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | |
1747 | ||
1748 | return vmcs_readl(sf->base); | |
1749 | } | |
1750 | ||
1751 | static void vmx_get_segment(struct kvm_vcpu *vcpu, | |
1752 | struct kvm_segment *var, int seg) | |
1753 | { | |
1754 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | |
1755 | u32 ar; | |
1756 | ||
1757 | var->base = vmcs_readl(sf->base); | |
1758 | var->limit = vmcs_read32(sf->limit); | |
1759 | var->selector = vmcs_read16(sf->selector); | |
1760 | ar = vmcs_read32(sf->ar_bytes); | |
1761 | if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) | |
1762 | ar = 0; | |
1763 | var->type = ar & 15; | |
1764 | var->s = (ar >> 4) & 1; | |
1765 | var->dpl = (ar >> 5) & 3; | |
1766 | var->present = (ar >> 7) & 1; | |
1767 | var->avl = (ar >> 12) & 1; | |
1768 | var->l = (ar >> 13) & 1; | |
1769 | var->db = (ar >> 14) & 1; | |
1770 | var->g = (ar >> 15) & 1; | |
1771 | var->unusable = (ar >> 16) & 1; | |
1772 | } | |
1773 | ||
1774 | static int vmx_get_cpl(struct kvm_vcpu *vcpu) | |
1775 | { | |
1776 | if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ | |
1777 | return 0; | |
1778 | ||
1779 | if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ | |
1780 | return 3; | |
1781 | ||
1782 | return vmcs_read16(GUEST_CS_SELECTOR) & 3; | |
1783 | } | |
1784 | ||
1785 | static u32 vmx_segment_access_rights(struct kvm_segment *var) | |
1786 | { | |
1787 | u32 ar; | |
1788 | ||
1789 | if (var->unusable) | |
1790 | ar = 1 << 16; | |
1791 | else { | |
1792 | ar = var->type & 15; | |
1793 | ar |= (var->s & 1) << 4; | |
1794 | ar |= (var->dpl & 3) << 5; | |
1795 | ar |= (var->present & 1) << 7; | |
1796 | ar |= (var->avl & 1) << 12; | |
1797 | ar |= (var->l & 1) << 13; | |
1798 | ar |= (var->db & 1) << 14; | |
1799 | ar |= (var->g & 1) << 15; | |
1800 | } | |
1801 | if (ar == 0) /* a 0 value means unusable */ | |
1802 | ar = AR_UNUSABLE_MASK; | |
1803 | ||
1804 | return ar; | |
1805 | } | |
1806 | ||
1807 | static void vmx_set_segment(struct kvm_vcpu *vcpu, | |
1808 | struct kvm_segment *var, int seg) | |
1809 | { | |
1810 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1811 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | |
1812 | u32 ar; | |
1813 | ||
1814 | if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { | |
1815 | vmx->rmode.tr.selector = var->selector; | |
1816 | vmx->rmode.tr.base = var->base; | |
1817 | vmx->rmode.tr.limit = var->limit; | |
1818 | vmx->rmode.tr.ar = vmx_segment_access_rights(var); | |
1819 | return; | |
1820 | } | |
1821 | vmcs_writel(sf->base, var->base); | |
1822 | vmcs_write32(sf->limit, var->limit); | |
1823 | vmcs_write16(sf->selector, var->selector); | |
1824 | if (vmx->rmode.vm86_active && var->s) { | |
1825 | /* | |
1826 | * Hack real-mode segments into vm86 compatibility. | |
1827 | */ | |
1828 | if (var->base == 0xffff0000 && var->selector == 0xf000) | |
1829 | vmcs_writel(sf->base, 0xf0000); | |
1830 | ar = 0xf3; | |
1831 | } else | |
1832 | ar = vmx_segment_access_rights(var); | |
1833 | ||
1834 | /* | |
1835 | * Fix the "Accessed" bit in AR field of segment registers for older | |
1836 | * qemu binaries. | |
1837 | * IA32 arch specifies that at the time of processor reset the | |
1838 | * "Accessed" bit in the AR field of segment registers is 1. And qemu | |
1839 | * is setting it to 0 in the usedland code. This causes invalid guest | |
1840 | * state vmexit when "unrestricted guest" mode is turned on. | |
1841 | * Fix for this setup issue in cpu_reset is being pushed in the qemu | |
1842 | * tree. Newer qemu binaries with that qemu fix would not need this | |
1843 | * kvm hack. | |
1844 | */ | |
1845 | if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) | |
1846 | ar |= 0x1; /* Accessed */ | |
1847 | ||
1848 | vmcs_write32(sf->ar_bytes, ar); | |
1849 | } | |
1850 | ||
1851 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |
1852 | { | |
1853 | u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); | |
1854 | ||
1855 | *db = (ar >> 14) & 1; | |
1856 | *l = (ar >> 13) & 1; | |
1857 | } | |
1858 | ||
1859 | static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
1860 | { | |
1861 | dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); | |
1862 | dt->base = vmcs_readl(GUEST_IDTR_BASE); | |
1863 | } | |
1864 | ||
1865 | static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
1866 | { | |
1867 | vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); | |
1868 | vmcs_writel(GUEST_IDTR_BASE, dt->base); | |
1869 | } | |
1870 | ||
1871 | static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
1872 | { | |
1873 | dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); | |
1874 | dt->base = vmcs_readl(GUEST_GDTR_BASE); | |
1875 | } | |
1876 | ||
1877 | static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
1878 | { | |
1879 | vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); | |
1880 | vmcs_writel(GUEST_GDTR_BASE, dt->base); | |
1881 | } | |
1882 | ||
1883 | static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) | |
1884 | { | |
1885 | struct kvm_segment var; | |
1886 | u32 ar; | |
1887 | ||
1888 | vmx_get_segment(vcpu, &var, seg); | |
1889 | ar = vmx_segment_access_rights(&var); | |
1890 | ||
1891 | if (var.base != (var.selector << 4)) | |
1892 | return false; | |
1893 | if (var.limit != 0xffff) | |
1894 | return false; | |
1895 | if (ar != 0xf3) | |
1896 | return false; | |
1897 | ||
1898 | return true; | |
1899 | } | |
1900 | ||
1901 | static bool code_segment_valid(struct kvm_vcpu *vcpu) | |
1902 | { | |
1903 | struct kvm_segment cs; | |
1904 | unsigned int cs_rpl; | |
1905 | ||
1906 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); | |
1907 | cs_rpl = cs.selector & SELECTOR_RPL_MASK; | |
1908 | ||
1909 | if (cs.unusable) | |
1910 | return false; | |
1911 | if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) | |
1912 | return false; | |
1913 | if (!cs.s) | |
1914 | return false; | |
1915 | if (cs.type & AR_TYPE_WRITEABLE_MASK) { | |
1916 | if (cs.dpl > cs_rpl) | |
1917 | return false; | |
1918 | } else { | |
1919 | if (cs.dpl != cs_rpl) | |
1920 | return false; | |
1921 | } | |
1922 | if (!cs.present) | |
1923 | return false; | |
1924 | ||
1925 | /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ | |
1926 | return true; | |
1927 | } | |
1928 | ||
1929 | static bool stack_segment_valid(struct kvm_vcpu *vcpu) | |
1930 | { | |
1931 | struct kvm_segment ss; | |
1932 | unsigned int ss_rpl; | |
1933 | ||
1934 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); | |
1935 | ss_rpl = ss.selector & SELECTOR_RPL_MASK; | |
1936 | ||
1937 | if (ss.unusable) | |
1938 | return true; | |
1939 | if (ss.type != 3 && ss.type != 7) | |
1940 | return false; | |
1941 | if (!ss.s) | |
1942 | return false; | |
1943 | if (ss.dpl != ss_rpl) /* DPL != RPL */ | |
1944 | return false; | |
1945 | if (!ss.present) | |
1946 | return false; | |
1947 | ||
1948 | return true; | |
1949 | } | |
1950 | ||
1951 | static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) | |
1952 | { | |
1953 | struct kvm_segment var; | |
1954 | unsigned int rpl; | |
1955 | ||
1956 | vmx_get_segment(vcpu, &var, seg); | |
1957 | rpl = var.selector & SELECTOR_RPL_MASK; | |
1958 | ||
1959 | if (var.unusable) | |
1960 | return true; | |
1961 | if (!var.s) | |
1962 | return false; | |
1963 | if (!var.present) | |
1964 | return false; | |
1965 | if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { | |
1966 | if (var.dpl < rpl) /* DPL < RPL */ | |
1967 | return false; | |
1968 | } | |
1969 | ||
1970 | /* TODO: Add other members to kvm_segment_field to allow checking for other access | |
1971 | * rights flags | |
1972 | */ | |
1973 | return true; | |
1974 | } | |
1975 | ||
1976 | static bool tr_valid(struct kvm_vcpu *vcpu) | |
1977 | { | |
1978 | struct kvm_segment tr; | |
1979 | ||
1980 | vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); | |
1981 | ||
1982 | if (tr.unusable) | |
1983 | return false; | |
1984 | if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ | |
1985 | return false; | |
1986 | if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ | |
1987 | return false; | |
1988 | if (!tr.present) | |
1989 | return false; | |
1990 | ||
1991 | return true; | |
1992 | } | |
1993 | ||
1994 | static bool ldtr_valid(struct kvm_vcpu *vcpu) | |
1995 | { | |
1996 | struct kvm_segment ldtr; | |
1997 | ||
1998 | vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); | |
1999 | ||
2000 | if (ldtr.unusable) | |
2001 | return true; | |
2002 | if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ | |
2003 | return false; | |
2004 | if (ldtr.type != 2) | |
2005 | return false; | |
2006 | if (!ldtr.present) | |
2007 | return false; | |
2008 | ||
2009 | return true; | |
2010 | } | |
2011 | ||
2012 | static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) | |
2013 | { | |
2014 | struct kvm_segment cs, ss; | |
2015 | ||
2016 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); | |
2017 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); | |
2018 | ||
2019 | return ((cs.selector & SELECTOR_RPL_MASK) == | |
2020 | (ss.selector & SELECTOR_RPL_MASK)); | |
2021 | } | |
2022 | ||
2023 | /* | |
2024 | * Check if guest state is valid. Returns true if valid, false if | |
2025 | * not. | |
2026 | * We assume that registers are always usable | |
2027 | */ | |
2028 | static bool guest_state_valid(struct kvm_vcpu *vcpu) | |
2029 | { | |
2030 | /* real mode guest state checks */ | |
2031 | if (!(vcpu->arch.cr0 & X86_CR0_PE)) { | |
2032 | if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) | |
2033 | return false; | |
2034 | if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) | |
2035 | return false; | |
2036 | if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) | |
2037 | return false; | |
2038 | if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) | |
2039 | return false; | |
2040 | if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) | |
2041 | return false; | |
2042 | if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) | |
2043 | return false; | |
2044 | } else { | |
2045 | /* protected mode guest state checks */ | |
2046 | if (!cs_ss_rpl_check(vcpu)) | |
2047 | return false; | |
2048 | if (!code_segment_valid(vcpu)) | |
2049 | return false; | |
2050 | if (!stack_segment_valid(vcpu)) | |
2051 | return false; | |
2052 | if (!data_segment_valid(vcpu, VCPU_SREG_DS)) | |
2053 | return false; | |
2054 | if (!data_segment_valid(vcpu, VCPU_SREG_ES)) | |
2055 | return false; | |
2056 | if (!data_segment_valid(vcpu, VCPU_SREG_FS)) | |
2057 | return false; | |
2058 | if (!data_segment_valid(vcpu, VCPU_SREG_GS)) | |
2059 | return false; | |
2060 | if (!tr_valid(vcpu)) | |
2061 | return false; | |
2062 | if (!ldtr_valid(vcpu)) | |
2063 | return false; | |
2064 | } | |
2065 | /* TODO: | |
2066 | * - Add checks on RIP | |
2067 | * - Add checks on RFLAGS | |
2068 | */ | |
2069 | ||
2070 | return true; | |
2071 | } | |
2072 | ||
2073 | static int init_rmode_tss(struct kvm *kvm) | |
2074 | { | |
2075 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | |
2076 | u16 data = 0; | |
2077 | int ret = 0; | |
2078 | int r; | |
2079 | ||
2080 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | |
2081 | if (r < 0) | |
2082 | goto out; | |
2083 | data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; | |
2084 | r = kvm_write_guest_page(kvm, fn++, &data, | |
2085 | TSS_IOPB_BASE_OFFSET, sizeof(u16)); | |
2086 | if (r < 0) | |
2087 | goto out; | |
2088 | r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); | |
2089 | if (r < 0) | |
2090 | goto out; | |
2091 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | |
2092 | if (r < 0) | |
2093 | goto out; | |
2094 | data = ~0; | |
2095 | r = kvm_write_guest_page(kvm, fn, &data, | |
2096 | RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, | |
2097 | sizeof(u8)); | |
2098 | if (r < 0) | |
2099 | goto out; | |
2100 | ||
2101 | ret = 1; | |
2102 | out: | |
2103 | return ret; | |
2104 | } | |
2105 | ||
2106 | static int init_rmode_identity_map(struct kvm *kvm) | |
2107 | { | |
2108 | int i, r, ret; | |
2109 | pfn_t identity_map_pfn; | |
2110 | u32 tmp; | |
2111 | ||
2112 | if (!enable_ept) | |
2113 | return 1; | |
2114 | if (unlikely(!kvm->arch.ept_identity_pagetable)) { | |
2115 | printk(KERN_ERR "EPT: identity-mapping pagetable " | |
2116 | "haven't been allocated!\n"); | |
2117 | return 0; | |
2118 | } | |
2119 | if (likely(kvm->arch.ept_identity_pagetable_done)) | |
2120 | return 1; | |
2121 | ret = 0; | |
2122 | identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; | |
2123 | r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); | |
2124 | if (r < 0) | |
2125 | goto out; | |
2126 | /* Set up identity-mapping pagetable for EPT in real mode */ | |
2127 | for (i = 0; i < PT32_ENT_PER_PAGE; i++) { | |
2128 | tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | | |
2129 | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); | |
2130 | r = kvm_write_guest_page(kvm, identity_map_pfn, | |
2131 | &tmp, i * sizeof(tmp), sizeof(tmp)); | |
2132 | if (r < 0) | |
2133 | goto out; | |
2134 | } | |
2135 | kvm->arch.ept_identity_pagetable_done = true; | |
2136 | ret = 1; | |
2137 | out: | |
2138 | return ret; | |
2139 | } | |
2140 | ||
2141 | static void seg_setup(int seg) | |
2142 | { | |
2143 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | |
2144 | unsigned int ar; | |
2145 | ||
2146 | vmcs_write16(sf->selector, 0); | |
2147 | vmcs_writel(sf->base, 0); | |
2148 | vmcs_write32(sf->limit, 0xffff); | |
2149 | if (enable_unrestricted_guest) { | |
2150 | ar = 0x93; | |
2151 | if (seg == VCPU_SREG_CS) | |
2152 | ar |= 0x08; /* code segment */ | |
2153 | } else | |
2154 | ar = 0xf3; | |
2155 | ||
2156 | vmcs_write32(sf->ar_bytes, ar); | |
2157 | } | |
2158 | ||
2159 | static int alloc_apic_access_page(struct kvm *kvm) | |
2160 | { | |
2161 | struct kvm_userspace_memory_region kvm_userspace_mem; | |
2162 | int r = 0; | |
2163 | ||
2164 | down_write(&kvm->slots_lock); | |
2165 | if (kvm->arch.apic_access_page) | |
2166 | goto out; | |
2167 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | |
2168 | kvm_userspace_mem.flags = 0; | |
2169 | kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; | |
2170 | kvm_userspace_mem.memory_size = PAGE_SIZE; | |
2171 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | |
2172 | if (r) | |
2173 | goto out; | |
2174 | ||
2175 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | |
2176 | out: | |
2177 | up_write(&kvm->slots_lock); | |
2178 | return r; | |
2179 | } | |
2180 | ||
2181 | static int alloc_identity_pagetable(struct kvm *kvm) | |
2182 | { | |
2183 | struct kvm_userspace_memory_region kvm_userspace_mem; | |
2184 | int r = 0; | |
2185 | ||
2186 | down_write(&kvm->slots_lock); | |
2187 | if (kvm->arch.ept_identity_pagetable) | |
2188 | goto out; | |
2189 | kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; | |
2190 | kvm_userspace_mem.flags = 0; | |
2191 | kvm_userspace_mem.guest_phys_addr = | |
2192 | kvm->arch.ept_identity_map_addr; | |
2193 | kvm_userspace_mem.memory_size = PAGE_SIZE; | |
2194 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | |
2195 | if (r) | |
2196 | goto out; | |
2197 | ||
2198 | kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, | |
2199 | kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); | |
2200 | out: | |
2201 | up_write(&kvm->slots_lock); | |
2202 | return r; | |
2203 | } | |
2204 | ||
2205 | static void allocate_vpid(struct vcpu_vmx *vmx) | |
2206 | { | |
2207 | int vpid; | |
2208 | ||
2209 | vmx->vpid = 0; | |
2210 | if (!enable_vpid) | |
2211 | return; | |
2212 | spin_lock(&vmx_vpid_lock); | |
2213 | vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); | |
2214 | if (vpid < VMX_NR_VPIDS) { | |
2215 | vmx->vpid = vpid; | |
2216 | __set_bit(vpid, vmx_vpid_bitmap); | |
2217 | } | |
2218 | spin_unlock(&vmx_vpid_lock); | |
2219 | } | |
2220 | ||
2221 | static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) | |
2222 | { | |
2223 | int f = sizeof(unsigned long); | |
2224 | ||
2225 | if (!cpu_has_vmx_msr_bitmap()) | |
2226 | return; | |
2227 | ||
2228 | /* | |
2229 | * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals | |
2230 | * have the write-low and read-high bitmap offsets the wrong way round. | |
2231 | * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. | |
2232 | */ | |
2233 | if (msr <= 0x1fff) { | |
2234 | __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */ | |
2235 | __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */ | |
2236 | } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { | |
2237 | msr &= 0x1fff; | |
2238 | __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */ | |
2239 | __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */ | |
2240 | } | |
2241 | } | |
2242 | ||
2243 | static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) | |
2244 | { | |
2245 | if (!longmode_only) | |
2246 | __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr); | |
2247 | __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr); | |
2248 | } | |
2249 | ||
2250 | /* | |
2251 | * Sets up the vmcs for emulated real mode. | |
2252 | */ | |
2253 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |
2254 | { | |
2255 | u32 host_sysenter_cs, msr_low, msr_high; | |
2256 | u32 junk; | |
2257 | u64 host_pat, tsc_this, tsc_base; | |
2258 | unsigned long a; | |
2259 | struct descriptor_table dt; | |
2260 | int i; | |
2261 | unsigned long kvm_vmx_return; | |
2262 | u32 exec_control; | |
2263 | ||
2264 | /* I/O */ | |
2265 | vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); | |
2266 | vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); | |
2267 | ||
2268 | if (cpu_has_vmx_msr_bitmap()) | |
2269 | vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); | |
2270 | ||
2271 | vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ | |
2272 | ||
2273 | /* Control */ | |
2274 | vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, | |
2275 | vmcs_config.pin_based_exec_ctrl); | |
2276 | ||
2277 | exec_control = vmcs_config.cpu_based_exec_ctrl; | |
2278 | if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { | |
2279 | exec_control &= ~CPU_BASED_TPR_SHADOW; | |
2280 | #ifdef CONFIG_X86_64 | |
2281 | exec_control |= CPU_BASED_CR8_STORE_EXITING | | |
2282 | CPU_BASED_CR8_LOAD_EXITING; | |
2283 | #endif | |
2284 | } | |
2285 | if (!enable_ept) | |
2286 | exec_control |= CPU_BASED_CR3_STORE_EXITING | | |
2287 | CPU_BASED_CR3_LOAD_EXITING | | |
2288 | CPU_BASED_INVLPG_EXITING; | |
2289 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); | |
2290 | ||
2291 | if (cpu_has_secondary_exec_ctrls()) { | |
2292 | exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; | |
2293 | if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | |
2294 | exec_control &= | |
2295 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | |
2296 | if (vmx->vpid == 0) | |
2297 | exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; | |
2298 | if (!enable_ept) | |
2299 | exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; | |
2300 | if (!enable_unrestricted_guest) | |
2301 | exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; | |
2302 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | |
2303 | } | |
2304 | ||
2305 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); | |
2306 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); | |
2307 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ | |
2308 | ||
2309 | vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */ | |
2310 | vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ | |
2311 | vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ | |
2312 | ||
2313 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | |
2314 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | |
2315 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | |
2316 | vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ | |
2317 | vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ | |
2318 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | |
2319 | #ifdef CONFIG_X86_64 | |
2320 | rdmsrl(MSR_FS_BASE, a); | |
2321 | vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ | |
2322 | rdmsrl(MSR_GS_BASE, a); | |
2323 | vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ | |
2324 | #else | |
2325 | vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ | |
2326 | vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ | |
2327 | #endif | |
2328 | ||
2329 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ | |
2330 | ||
2331 | kvm_get_idt(&dt); | |
2332 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ | |
2333 | ||
2334 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | |
2335 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ | |
2336 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); | |
2337 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | |
2338 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); | |
2339 | ||
2340 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); | |
2341 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); | |
2342 | rdmsrl(MSR_IA32_SYSENTER_ESP, a); | |
2343 | vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */ | |
2344 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); | |
2345 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ | |
2346 | ||
2347 | if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { | |
2348 | rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); | |
2349 | host_pat = msr_low | ((u64) msr_high << 32); | |
2350 | vmcs_write64(HOST_IA32_PAT, host_pat); | |
2351 | } | |
2352 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | |
2353 | rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); | |
2354 | host_pat = msr_low | ((u64) msr_high << 32); | |
2355 | /* Write the default value follow host pat */ | |
2356 | vmcs_write64(GUEST_IA32_PAT, host_pat); | |
2357 | /* Keep arch.pat sync with GUEST_IA32_PAT */ | |
2358 | vmx->vcpu.arch.pat = host_pat; | |
2359 | } | |
2360 | ||
2361 | for (i = 0; i < NR_VMX_MSR; ++i) { | |
2362 | u32 index = vmx_msr_index[i]; | |
2363 | u32 data_low, data_high; | |
2364 | u64 data; | |
2365 | int j = vmx->nmsrs; | |
2366 | ||
2367 | if (rdmsr_safe(index, &data_low, &data_high) < 0) | |
2368 | continue; | |
2369 | if (wrmsr_safe(index, data_low, data_high) < 0) | |
2370 | continue; | |
2371 | data = data_low | ((u64)data_high << 32); | |
2372 | vmx->host_msrs[j].index = index; | |
2373 | vmx->host_msrs[j].reserved = 0; | |
2374 | vmx->host_msrs[j].data = data; | |
2375 | vmx->guest_msrs[j] = vmx->host_msrs[j]; | |
2376 | ++vmx->nmsrs; | |
2377 | } | |
2378 | ||
2379 | vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); | |
2380 | ||
2381 | /* 22.2.1, 20.8.1 */ | |
2382 | vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); | |
2383 | ||
2384 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); | |
2385 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | |
2386 | ||
2387 | tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; | |
2388 | rdtscll(tsc_this); | |
2389 | if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc) | |
2390 | tsc_base = tsc_this; | |
2391 | ||
2392 | guest_write_tsc(0, tsc_base); | |
2393 | ||
2394 | return 0; | |
2395 | } | |
2396 | ||
2397 | static int init_rmode(struct kvm *kvm) | |
2398 | { | |
2399 | if (!init_rmode_tss(kvm)) | |
2400 | return 0; | |
2401 | if (!init_rmode_identity_map(kvm)) | |
2402 | return 0; | |
2403 | return 1; | |
2404 | } | |
2405 | ||
2406 | static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |
2407 | { | |
2408 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2409 | u64 msr; | |
2410 | int ret; | |
2411 | ||
2412 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | |
2413 | down_read(&vcpu->kvm->slots_lock); | |
2414 | if (!init_rmode(vmx->vcpu.kvm)) { | |
2415 | ret = -ENOMEM; | |
2416 | goto out; | |
2417 | } | |
2418 | ||
2419 | vmx->rmode.vm86_active = 0; | |
2420 | ||
2421 | vmx->soft_vnmi_blocked = 0; | |
2422 | ||
2423 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | |
2424 | kvm_set_cr8(&vmx->vcpu, 0); | |
2425 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | |
2426 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) | |
2427 | msr |= MSR_IA32_APICBASE_BSP; | |
2428 | kvm_set_apic_base(&vmx->vcpu, msr); | |
2429 | ||
2430 | fx_init(&vmx->vcpu); | |
2431 | ||
2432 | seg_setup(VCPU_SREG_CS); | |
2433 | /* | |
2434 | * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode | |
2435 | * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. | |
2436 | */ | |
2437 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) { | |
2438 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); | |
2439 | vmcs_writel(GUEST_CS_BASE, 0x000f0000); | |
2440 | } else { | |
2441 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); | |
2442 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); | |
2443 | } | |
2444 | ||
2445 | seg_setup(VCPU_SREG_DS); | |
2446 | seg_setup(VCPU_SREG_ES); | |
2447 | seg_setup(VCPU_SREG_FS); | |
2448 | seg_setup(VCPU_SREG_GS); | |
2449 | seg_setup(VCPU_SREG_SS); | |
2450 | ||
2451 | vmcs_write16(GUEST_TR_SELECTOR, 0); | |
2452 | vmcs_writel(GUEST_TR_BASE, 0); | |
2453 | vmcs_write32(GUEST_TR_LIMIT, 0xffff); | |
2454 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | |
2455 | ||
2456 | vmcs_write16(GUEST_LDTR_SELECTOR, 0); | |
2457 | vmcs_writel(GUEST_LDTR_BASE, 0); | |
2458 | vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); | |
2459 | vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); | |
2460 | ||
2461 | vmcs_write32(GUEST_SYSENTER_CS, 0); | |
2462 | vmcs_writel(GUEST_SYSENTER_ESP, 0); | |
2463 | vmcs_writel(GUEST_SYSENTER_EIP, 0); | |
2464 | ||
2465 | vmcs_writel(GUEST_RFLAGS, 0x02); | |
2466 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) | |
2467 | kvm_rip_write(vcpu, 0xfff0); | |
2468 | else | |
2469 | kvm_rip_write(vcpu, 0); | |
2470 | kvm_register_write(vcpu, VCPU_REGS_RSP, 0); | |
2471 | ||
2472 | vmcs_writel(GUEST_DR7, 0x400); | |
2473 | ||
2474 | vmcs_writel(GUEST_GDTR_BASE, 0); | |
2475 | vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); | |
2476 | ||
2477 | vmcs_writel(GUEST_IDTR_BASE, 0); | |
2478 | vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); | |
2479 | ||
2480 | vmcs_write32(GUEST_ACTIVITY_STATE, 0); | |
2481 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); | |
2482 | vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); | |
2483 | ||
2484 | /* Special registers */ | |
2485 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); | |
2486 | ||
2487 | setup_msrs(vmx); | |
2488 | ||
2489 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ | |
2490 | ||
2491 | if (cpu_has_vmx_tpr_shadow()) { | |
2492 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); | |
2493 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) | |
2494 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, | |
2495 | page_to_phys(vmx->vcpu.arch.apic->regs_page)); | |
2496 | vmcs_write32(TPR_THRESHOLD, 0); | |
2497 | } | |
2498 | ||
2499 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | |
2500 | vmcs_write64(APIC_ACCESS_ADDR, | |
2501 | page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); | |
2502 | ||
2503 | if (vmx->vpid != 0) | |
2504 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); | |
2505 | ||
2506 | vmx->vcpu.arch.cr0 = 0x60000010; | |
2507 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ | |
2508 | vmx_set_cr4(&vmx->vcpu, 0); | |
2509 | vmx_set_efer(&vmx->vcpu, 0); | |
2510 | vmx_fpu_activate(&vmx->vcpu); | |
2511 | update_exception_bitmap(&vmx->vcpu); | |
2512 | ||
2513 | vpid_sync_vcpu_all(vmx); | |
2514 | ||
2515 | ret = 0; | |
2516 | ||
2517 | /* HACK: Don't enable emulation on guest boot/reset */ | |
2518 | vmx->emulation_required = 0; | |
2519 | ||
2520 | out: | |
2521 | up_read(&vcpu->kvm->slots_lock); | |
2522 | return ret; | |
2523 | } | |
2524 | ||
2525 | static void enable_irq_window(struct kvm_vcpu *vcpu) | |
2526 | { | |
2527 | u32 cpu_based_vm_exec_control; | |
2528 | ||
2529 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | |
2530 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | |
2531 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | |
2532 | } | |
2533 | ||
2534 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | |
2535 | { | |
2536 | u32 cpu_based_vm_exec_control; | |
2537 | ||
2538 | if (!cpu_has_virtual_nmis()) { | |
2539 | enable_irq_window(vcpu); | |
2540 | return; | |
2541 | } | |
2542 | ||
2543 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | |
2544 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; | |
2545 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | |
2546 | } | |
2547 | ||
2548 | static void vmx_inject_irq(struct kvm_vcpu *vcpu) | |
2549 | { | |
2550 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2551 | uint32_t intr; | |
2552 | int irq = vcpu->arch.interrupt.nr; | |
2553 | ||
2554 | trace_kvm_inj_virq(irq); | |
2555 | ||
2556 | ++vcpu->stat.irq_injections; | |
2557 | if (vmx->rmode.vm86_active) { | |
2558 | vmx->rmode.irq.pending = true; | |
2559 | vmx->rmode.irq.vector = irq; | |
2560 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | |
2561 | if (vcpu->arch.interrupt.soft) | |
2562 | vmx->rmode.irq.rip += | |
2563 | vmx->vcpu.arch.event_exit_inst_len; | |
2564 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | |
2565 | irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); | |
2566 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | |
2567 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); | |
2568 | return; | |
2569 | } | |
2570 | intr = irq | INTR_INFO_VALID_MASK; | |
2571 | if (vcpu->arch.interrupt.soft) { | |
2572 | intr |= INTR_TYPE_SOFT_INTR; | |
2573 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | |
2574 | vmx->vcpu.arch.event_exit_inst_len); | |
2575 | } else | |
2576 | intr |= INTR_TYPE_EXT_INTR; | |
2577 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); | |
2578 | } | |
2579 | ||
2580 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |
2581 | { | |
2582 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2583 | ||
2584 | if (!cpu_has_virtual_nmis()) { | |
2585 | /* | |
2586 | * Tracking the NMI-blocked state in software is built upon | |
2587 | * finding the next open IRQ window. This, in turn, depends on | |
2588 | * well-behaving guests: They have to keep IRQs disabled at | |
2589 | * least as long as the NMI handler runs. Otherwise we may | |
2590 | * cause NMI nesting, maybe breaking the guest. But as this is | |
2591 | * highly unlikely, we can live with the residual risk. | |
2592 | */ | |
2593 | vmx->soft_vnmi_blocked = 1; | |
2594 | vmx->vnmi_blocked_time = 0; | |
2595 | } | |
2596 | ||
2597 | ++vcpu->stat.nmi_injections; | |
2598 | if (vmx->rmode.vm86_active) { | |
2599 | vmx->rmode.irq.pending = true; | |
2600 | vmx->rmode.irq.vector = NMI_VECTOR; | |
2601 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | |
2602 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | |
2603 | NMI_VECTOR | INTR_TYPE_SOFT_INTR | | |
2604 | INTR_INFO_VALID_MASK); | |
2605 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | |
2606 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); | |
2607 | return; | |
2608 | } | |
2609 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | |
2610 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | |
2611 | } | |
2612 | ||
2613 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | |
2614 | { | |
2615 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) | |
2616 | return 0; | |
2617 | ||
2618 | return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | |
2619 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS | | |
2620 | GUEST_INTR_STATE_NMI)); | |
2621 | } | |
2622 | ||
2623 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | |
2624 | { | |
2625 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | |
2626 | !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | |
2627 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); | |
2628 | } | |
2629 | ||
2630 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | |
2631 | { | |
2632 | int ret; | |
2633 | struct kvm_userspace_memory_region tss_mem = { | |
2634 | .slot = TSS_PRIVATE_MEMSLOT, | |
2635 | .guest_phys_addr = addr, | |
2636 | .memory_size = PAGE_SIZE * 3, | |
2637 | .flags = 0, | |
2638 | }; | |
2639 | ||
2640 | ret = kvm_set_memory_region(kvm, &tss_mem, 0); | |
2641 | if (ret) | |
2642 | return ret; | |
2643 | kvm->arch.tss_addr = addr; | |
2644 | return 0; | |
2645 | } | |
2646 | ||
2647 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |
2648 | int vec, u32 err_code) | |
2649 | { | |
2650 | /* | |
2651 | * Instruction with address size override prefix opcode 0x67 | |
2652 | * Cause the #SS fault with 0 error code in VM86 mode. | |
2653 | */ | |
2654 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | |
2655 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | |
2656 | return 1; | |
2657 | /* | |
2658 | * Forward all other exceptions that are valid in real mode. | |
2659 | * FIXME: Breaks guest debugging in real mode, needs to be fixed with | |
2660 | * the required debugging infrastructure rework. | |
2661 | */ | |
2662 | switch (vec) { | |
2663 | case DB_VECTOR: | |
2664 | if (vcpu->guest_debug & | |
2665 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | |
2666 | return 0; | |
2667 | kvm_queue_exception(vcpu, vec); | |
2668 | return 1; | |
2669 | case BP_VECTOR: | |
2670 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | |
2671 | return 0; | |
2672 | /* fall through */ | |
2673 | case DE_VECTOR: | |
2674 | case OF_VECTOR: | |
2675 | case BR_VECTOR: | |
2676 | case UD_VECTOR: | |
2677 | case DF_VECTOR: | |
2678 | case SS_VECTOR: | |
2679 | case GP_VECTOR: | |
2680 | case MF_VECTOR: | |
2681 | kvm_queue_exception(vcpu, vec); | |
2682 | return 1; | |
2683 | } | |
2684 | return 0; | |
2685 | } | |
2686 | ||
2687 | /* | |
2688 | * Trigger machine check on the host. We assume all the MSRs are already set up | |
2689 | * by the CPU and that we still run on the same CPU as the MCE occurred on. | |
2690 | * We pass a fake environment to the machine check handler because we want | |
2691 | * the guest to be always treated like user space, no matter what context | |
2692 | * it used internally. | |
2693 | */ | |
2694 | static void kvm_machine_check(void) | |
2695 | { | |
2696 | #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) | |
2697 | struct pt_regs regs = { | |
2698 | .cs = 3, /* Fake ring 3 no matter what the guest ran on */ | |
2699 | .flags = X86_EFLAGS_IF, | |
2700 | }; | |
2701 | ||
2702 | do_machine_check(®s, 0); | |
2703 | #endif | |
2704 | } | |
2705 | ||
2706 | static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
2707 | { | |
2708 | /* already handled by vcpu_run */ | |
2709 | return 1; | |
2710 | } | |
2711 | ||
2712 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
2713 | { | |
2714 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2715 | u32 intr_info, ex_no, error_code; | |
2716 | unsigned long cr2, rip, dr6; | |
2717 | u32 vect_info; | |
2718 | enum emulation_result er; | |
2719 | ||
2720 | vect_info = vmx->idt_vectoring_info; | |
2721 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | |
2722 | ||
2723 | if (is_machine_check(intr_info)) | |
2724 | return handle_machine_check(vcpu, kvm_run); | |
2725 | ||
2726 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | |
2727 | !is_page_fault(intr_info)) | |
2728 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | |
2729 | "intr info 0x%x\n", __func__, vect_info, intr_info); | |
2730 | ||
2731 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) | |
2732 | return 1; /* already handled by vmx_vcpu_run() */ | |
2733 | ||
2734 | if (is_no_device(intr_info)) { | |
2735 | vmx_fpu_activate(vcpu); | |
2736 | return 1; | |
2737 | } | |
2738 | ||
2739 | if (is_invalid_opcode(intr_info)) { | |
2740 | er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); | |
2741 | if (er != EMULATE_DONE) | |
2742 | kvm_queue_exception(vcpu, UD_VECTOR); | |
2743 | return 1; | |
2744 | } | |
2745 | ||
2746 | error_code = 0; | |
2747 | rip = kvm_rip_read(vcpu); | |
2748 | if (intr_info & INTR_INFO_DELIVER_CODE_MASK) | |
2749 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | |
2750 | if (is_page_fault(intr_info)) { | |
2751 | /* EPT won't cause page fault directly */ | |
2752 | if (enable_ept) | |
2753 | BUG(); | |
2754 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | |
2755 | trace_kvm_page_fault(cr2, error_code); | |
2756 | ||
2757 | if (kvm_event_needs_reinjection(vcpu)) | |
2758 | kvm_mmu_unprotect_page_virt(vcpu, cr2); | |
2759 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | |
2760 | } | |
2761 | ||
2762 | if (vmx->rmode.vm86_active && | |
2763 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, | |
2764 | error_code)) { | |
2765 | if (vcpu->arch.halt_request) { | |
2766 | vcpu->arch.halt_request = 0; | |
2767 | return kvm_emulate_halt(vcpu); | |
2768 | } | |
2769 | return 1; | |
2770 | } | |
2771 | ||
2772 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; | |
2773 | switch (ex_no) { | |
2774 | case DB_VECTOR: | |
2775 | dr6 = vmcs_readl(EXIT_QUALIFICATION); | |
2776 | if (!(vcpu->guest_debug & | |
2777 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | |
2778 | vcpu->arch.dr6 = dr6 | DR6_FIXED_1; | |
2779 | kvm_queue_exception(vcpu, DB_VECTOR); | |
2780 | return 1; | |
2781 | } | |
2782 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; | |
2783 | kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); | |
2784 | /* fall through */ | |
2785 | case BP_VECTOR: | |
2786 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | |
2787 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; | |
2788 | kvm_run->debug.arch.exception = ex_no; | |
2789 | break; | |
2790 | default: | |
2791 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; | |
2792 | kvm_run->ex.exception = ex_no; | |
2793 | kvm_run->ex.error_code = error_code; | |
2794 | break; | |
2795 | } | |
2796 | return 0; | |
2797 | } | |
2798 | ||
2799 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |
2800 | struct kvm_run *kvm_run) | |
2801 | { | |
2802 | ++vcpu->stat.irq_exits; | |
2803 | return 1; | |
2804 | } | |
2805 | ||
2806 | static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
2807 | { | |
2808 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | |
2809 | return 0; | |
2810 | } | |
2811 | ||
2812 | static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
2813 | { | |
2814 | unsigned long exit_qualification; | |
2815 | int size, in, string; | |
2816 | unsigned port; | |
2817 | ||
2818 | ++vcpu->stat.io_exits; | |
2819 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
2820 | string = (exit_qualification & 16) != 0; | |
2821 | ||
2822 | if (string) { | |
2823 | if (emulate_instruction(vcpu, | |
2824 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | |
2825 | return 0; | |
2826 | return 1; | |
2827 | } | |
2828 | ||
2829 | size = (exit_qualification & 7) + 1; | |
2830 | in = (exit_qualification & 8) != 0; | |
2831 | port = exit_qualification >> 16; | |
2832 | ||
2833 | skip_emulated_instruction(vcpu); | |
2834 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | |
2835 | } | |
2836 | ||
2837 | static void | |
2838 | vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |
2839 | { | |
2840 | /* | |
2841 | * Patch in the VMCALL instruction: | |
2842 | */ | |
2843 | hypercall[0] = 0x0f; | |
2844 | hypercall[1] = 0x01; | |
2845 | hypercall[2] = 0xc1; | |
2846 | } | |
2847 | ||
2848 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
2849 | { | |
2850 | unsigned long exit_qualification, val; | |
2851 | int cr; | |
2852 | int reg; | |
2853 | ||
2854 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
2855 | cr = exit_qualification & 15; | |
2856 | reg = (exit_qualification >> 8) & 15; | |
2857 | switch ((exit_qualification >> 4) & 3) { | |
2858 | case 0: /* mov to cr */ | |
2859 | val = kvm_register_read(vcpu, reg); | |
2860 | trace_kvm_cr_write(cr, val); | |
2861 | switch (cr) { | |
2862 | case 0: | |
2863 | kvm_set_cr0(vcpu, val); | |
2864 | skip_emulated_instruction(vcpu); | |
2865 | return 1; | |
2866 | case 3: | |
2867 | kvm_set_cr3(vcpu, val); | |
2868 | skip_emulated_instruction(vcpu); | |
2869 | return 1; | |
2870 | case 4: | |
2871 | kvm_set_cr4(vcpu, val); | |
2872 | skip_emulated_instruction(vcpu); | |
2873 | return 1; | |
2874 | case 8: { | |
2875 | u8 cr8_prev = kvm_get_cr8(vcpu); | |
2876 | u8 cr8 = kvm_register_read(vcpu, reg); | |
2877 | kvm_set_cr8(vcpu, cr8); | |
2878 | skip_emulated_instruction(vcpu); | |
2879 | if (irqchip_in_kernel(vcpu->kvm)) | |
2880 | return 1; | |
2881 | if (cr8_prev <= cr8) | |
2882 | return 1; | |
2883 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | |
2884 | return 0; | |
2885 | } | |
2886 | }; | |
2887 | break; | |
2888 | case 2: /* clts */ | |
2889 | vmx_fpu_deactivate(vcpu); | |
2890 | vcpu->arch.cr0 &= ~X86_CR0_TS; | |
2891 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | |
2892 | vmx_fpu_activate(vcpu); | |
2893 | skip_emulated_instruction(vcpu); | |
2894 | return 1; | |
2895 | case 1: /*mov from cr*/ | |
2896 | switch (cr) { | |
2897 | case 3: | |
2898 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); | |
2899 | trace_kvm_cr_read(cr, vcpu->arch.cr3); | |
2900 | skip_emulated_instruction(vcpu); | |
2901 | return 1; | |
2902 | case 8: | |
2903 | val = kvm_get_cr8(vcpu); | |
2904 | kvm_register_write(vcpu, reg, val); | |
2905 | trace_kvm_cr_read(cr, val); | |
2906 | skip_emulated_instruction(vcpu); | |
2907 | return 1; | |
2908 | } | |
2909 | break; | |
2910 | case 3: /* lmsw */ | |
2911 | kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); | |
2912 | ||
2913 | skip_emulated_instruction(vcpu); | |
2914 | return 1; | |
2915 | default: | |
2916 | break; | |
2917 | } | |
2918 | kvm_run->exit_reason = 0; | |
2919 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", | |
2920 | (int)(exit_qualification >> 4) & 3, cr); | |
2921 | return 0; | |
2922 | } | |
2923 | ||
2924 | static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
2925 | { | |
2926 | unsigned long exit_qualification; | |
2927 | unsigned long val; | |
2928 | int dr, reg; | |
2929 | ||
2930 | dr = vmcs_readl(GUEST_DR7); | |
2931 | if (dr & DR7_GD) { | |
2932 | /* | |
2933 | * As the vm-exit takes precedence over the debug trap, we | |
2934 | * need to emulate the latter, either for the host or the | |
2935 | * guest debugging itself. | |
2936 | */ | |
2937 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { | |
2938 | kvm_run->debug.arch.dr6 = vcpu->arch.dr6; | |
2939 | kvm_run->debug.arch.dr7 = dr; | |
2940 | kvm_run->debug.arch.pc = | |
2941 | vmcs_readl(GUEST_CS_BASE) + | |
2942 | vmcs_readl(GUEST_RIP); | |
2943 | kvm_run->debug.arch.exception = DB_VECTOR; | |
2944 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | |
2945 | return 0; | |
2946 | } else { | |
2947 | vcpu->arch.dr7 &= ~DR7_GD; | |
2948 | vcpu->arch.dr6 |= DR6_BD; | |
2949 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | |
2950 | kvm_queue_exception(vcpu, DB_VECTOR); | |
2951 | return 1; | |
2952 | } | |
2953 | } | |
2954 | ||
2955 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
2956 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; | |
2957 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); | |
2958 | if (exit_qualification & TYPE_MOV_FROM_DR) { | |
2959 | switch (dr) { | |
2960 | case 0 ... 3: | |
2961 | val = vcpu->arch.db[dr]; | |
2962 | break; | |
2963 | case 6: | |
2964 | val = vcpu->arch.dr6; | |
2965 | break; | |
2966 | case 7: | |
2967 | val = vcpu->arch.dr7; | |
2968 | break; | |
2969 | default: | |
2970 | val = 0; | |
2971 | } | |
2972 | kvm_register_write(vcpu, reg, val); | |
2973 | } else { | |
2974 | val = vcpu->arch.regs[reg]; | |
2975 | switch (dr) { | |
2976 | case 0 ... 3: | |
2977 | vcpu->arch.db[dr] = val; | |
2978 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | |
2979 | vcpu->arch.eff_db[dr] = val; | |
2980 | break; | |
2981 | case 4 ... 5: | |
2982 | if (vcpu->arch.cr4 & X86_CR4_DE) | |
2983 | kvm_queue_exception(vcpu, UD_VECTOR); | |
2984 | break; | |
2985 | case 6: | |
2986 | if (val & 0xffffffff00000000ULL) { | |
2987 | kvm_queue_exception(vcpu, GP_VECTOR); | |
2988 | break; | |
2989 | } | |
2990 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | |
2991 | break; | |
2992 | case 7: | |
2993 | if (val & 0xffffffff00000000ULL) { | |
2994 | kvm_queue_exception(vcpu, GP_VECTOR); | |
2995 | break; | |
2996 | } | |
2997 | vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; | |
2998 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | |
2999 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | |
3000 | vcpu->arch.switch_db_regs = | |
3001 | (val & DR7_BP_EN_MASK); | |
3002 | } | |
3003 | break; | |
3004 | } | |
3005 | } | |
3006 | skip_emulated_instruction(vcpu); | |
3007 | return 1; | |
3008 | } | |
3009 | ||
3010 | static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3011 | { | |
3012 | kvm_emulate_cpuid(vcpu); | |
3013 | return 1; | |
3014 | } | |
3015 | ||
3016 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3017 | { | |
3018 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | |
3019 | u64 data; | |
3020 | ||
3021 | if (vmx_get_msr(vcpu, ecx, &data)) { | |
3022 | kvm_inject_gp(vcpu, 0); | |
3023 | return 1; | |
3024 | } | |
3025 | ||
3026 | trace_kvm_msr_read(ecx, data); | |
3027 | ||
3028 | /* FIXME: handling of bits 32:63 of rax, rdx */ | |
3029 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; | |
3030 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; | |
3031 | skip_emulated_instruction(vcpu); | |
3032 | return 1; | |
3033 | } | |
3034 | ||
3035 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3036 | { | |
3037 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | |
3038 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | |
3039 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | |
3040 | ||
3041 | trace_kvm_msr_write(ecx, data); | |
3042 | ||
3043 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | |
3044 | kvm_inject_gp(vcpu, 0); | |
3045 | return 1; | |
3046 | } | |
3047 | ||
3048 | skip_emulated_instruction(vcpu); | |
3049 | return 1; | |
3050 | } | |
3051 | ||
3052 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu, | |
3053 | struct kvm_run *kvm_run) | |
3054 | { | |
3055 | return 1; | |
3056 | } | |
3057 | ||
3058 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |
3059 | struct kvm_run *kvm_run) | |
3060 | { | |
3061 | u32 cpu_based_vm_exec_control; | |
3062 | ||
3063 | /* clear pending irq */ | |
3064 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | |
3065 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | |
3066 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | |
3067 | ||
3068 | ++vcpu->stat.irq_window_exits; | |
3069 | ||
3070 | /* | |
3071 | * If the user space waits to inject interrupts, exit as soon as | |
3072 | * possible | |
3073 | */ | |
3074 | if (!irqchip_in_kernel(vcpu->kvm) && | |
3075 | kvm_run->request_interrupt_window && | |
3076 | !kvm_cpu_has_interrupt(vcpu)) { | |
3077 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | |
3078 | return 0; | |
3079 | } | |
3080 | return 1; | |
3081 | } | |
3082 | ||
3083 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3084 | { | |
3085 | skip_emulated_instruction(vcpu); | |
3086 | return kvm_emulate_halt(vcpu); | |
3087 | } | |
3088 | ||
3089 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3090 | { | |
3091 | skip_emulated_instruction(vcpu); | |
3092 | kvm_emulate_hypercall(vcpu); | |
3093 | return 1; | |
3094 | } | |
3095 | ||
3096 | static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3097 | { | |
3098 | kvm_queue_exception(vcpu, UD_VECTOR); | |
3099 | return 1; | |
3100 | } | |
3101 | ||
3102 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3103 | { | |
3104 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
3105 | ||
3106 | kvm_mmu_invlpg(vcpu, exit_qualification); | |
3107 | skip_emulated_instruction(vcpu); | |
3108 | return 1; | |
3109 | } | |
3110 | ||
3111 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3112 | { | |
3113 | skip_emulated_instruction(vcpu); | |
3114 | /* TODO: Add support for VT-d/pass-through device */ | |
3115 | return 1; | |
3116 | } | |
3117 | ||
3118 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3119 | { | |
3120 | unsigned long exit_qualification; | |
3121 | enum emulation_result er; | |
3122 | unsigned long offset; | |
3123 | ||
3124 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
3125 | offset = exit_qualification & 0xffful; | |
3126 | ||
3127 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | |
3128 | ||
3129 | if (er != EMULATE_DONE) { | |
3130 | printk(KERN_ERR | |
3131 | "Fail to handle apic access vmexit! Offset is 0x%lx\n", | |
3132 | offset); | |
3133 | return -ENOEXEC; | |
3134 | } | |
3135 | return 1; | |
3136 | } | |
3137 | ||
3138 | static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3139 | { | |
3140 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3141 | unsigned long exit_qualification; | |
3142 | u16 tss_selector; | |
3143 | int reason, type, idt_v; | |
3144 | ||
3145 | idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | |
3146 | type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); | |
3147 | ||
3148 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
3149 | ||
3150 | reason = (u32)exit_qualification >> 30; | |
3151 | if (reason == TASK_SWITCH_GATE && idt_v) { | |
3152 | switch (type) { | |
3153 | case INTR_TYPE_NMI_INTR: | |
3154 | vcpu->arch.nmi_injected = false; | |
3155 | if (cpu_has_virtual_nmis()) | |
3156 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | |
3157 | GUEST_INTR_STATE_NMI); | |
3158 | break; | |
3159 | case INTR_TYPE_EXT_INTR: | |
3160 | case INTR_TYPE_SOFT_INTR: | |
3161 | kvm_clear_interrupt_queue(vcpu); | |
3162 | break; | |
3163 | case INTR_TYPE_HARD_EXCEPTION: | |
3164 | case INTR_TYPE_SOFT_EXCEPTION: | |
3165 | kvm_clear_exception_queue(vcpu); | |
3166 | break; | |
3167 | default: | |
3168 | break; | |
3169 | } | |
3170 | } | |
3171 | tss_selector = exit_qualification; | |
3172 | ||
3173 | if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && | |
3174 | type != INTR_TYPE_EXT_INTR && | |
3175 | type != INTR_TYPE_NMI_INTR)) | |
3176 | skip_emulated_instruction(vcpu); | |
3177 | ||
3178 | if (!kvm_task_switch(vcpu, tss_selector, reason)) | |
3179 | return 0; | |
3180 | ||
3181 | /* clear all local breakpoint enable flags */ | |
3182 | vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); | |
3183 | ||
3184 | /* | |
3185 | * TODO: What about debug traps on tss switch? | |
3186 | * Are we supposed to inject them and update dr6? | |
3187 | */ | |
3188 | ||
3189 | return 1; | |
3190 | } | |
3191 | ||
3192 | static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3193 | { | |
3194 | unsigned long exit_qualification; | |
3195 | gpa_t gpa; | |
3196 | int gla_validity; | |
3197 | ||
3198 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
3199 | ||
3200 | if (exit_qualification & (1 << 6)) { | |
3201 | printk(KERN_ERR "EPT: GPA exceeds GAW!\n"); | |
3202 | return -EINVAL; | |
3203 | } | |
3204 | ||
3205 | gla_validity = (exit_qualification >> 7) & 0x3; | |
3206 | if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { | |
3207 | printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); | |
3208 | printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", | |
3209 | (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), | |
3210 | vmcs_readl(GUEST_LINEAR_ADDRESS)); | |
3211 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", | |
3212 | (long unsigned int)exit_qualification); | |
3213 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
3214 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; | |
3215 | return 0; | |
3216 | } | |
3217 | ||
3218 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | |
3219 | trace_kvm_page_fault(gpa, exit_qualification); | |
3220 | return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); | |
3221 | } | |
3222 | ||
3223 | static u64 ept_rsvd_mask(u64 spte, int level) | |
3224 | { | |
3225 | int i; | |
3226 | u64 mask = 0; | |
3227 | ||
3228 | for (i = 51; i > boot_cpu_data.x86_phys_bits; i--) | |
3229 | mask |= (1ULL << i); | |
3230 | ||
3231 | if (level > 2) | |
3232 | /* bits 7:3 reserved */ | |
3233 | mask |= 0xf8; | |
3234 | else if (level == 2) { | |
3235 | if (spte & (1ULL << 7)) | |
3236 | /* 2MB ref, bits 20:12 reserved */ | |
3237 | mask |= 0x1ff000; | |
3238 | else | |
3239 | /* bits 6:3 reserved */ | |
3240 | mask |= 0x78; | |
3241 | } | |
3242 | ||
3243 | return mask; | |
3244 | } | |
3245 | ||
3246 | static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, | |
3247 | int level) | |
3248 | { | |
3249 | printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level); | |
3250 | ||
3251 | /* 010b (write-only) */ | |
3252 | WARN_ON((spte & 0x7) == 0x2); | |
3253 | ||
3254 | /* 110b (write/execute) */ | |
3255 | WARN_ON((spte & 0x7) == 0x6); | |
3256 | ||
3257 | /* 100b (execute-only) and value not supported by logical processor */ | |
3258 | if (!cpu_has_vmx_ept_execute_only()) | |
3259 | WARN_ON((spte & 0x7) == 0x4); | |
3260 | ||
3261 | /* not 000b */ | |
3262 | if ((spte & 0x7)) { | |
3263 | u64 rsvd_bits = spte & ept_rsvd_mask(spte, level); | |
3264 | ||
3265 | if (rsvd_bits != 0) { | |
3266 | printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n", | |
3267 | __func__, rsvd_bits); | |
3268 | WARN_ON(1); | |
3269 | } | |
3270 | ||
3271 | if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) { | |
3272 | u64 ept_mem_type = (spte & 0x38) >> 3; | |
3273 | ||
3274 | if (ept_mem_type == 2 || ept_mem_type == 3 || | |
3275 | ept_mem_type == 7) { | |
3276 | printk(KERN_ERR "%s: ept_mem_type=0x%llx\n", | |
3277 | __func__, ept_mem_type); | |
3278 | WARN_ON(1); | |
3279 | } | |
3280 | } | |
3281 | } | |
3282 | } | |
3283 | ||
3284 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3285 | { | |
3286 | u64 sptes[4]; | |
3287 | int nr_sptes, i; | |
3288 | gpa_t gpa; | |
3289 | ||
3290 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | |
3291 | ||
3292 | printk(KERN_ERR "EPT: Misconfiguration.\n"); | |
3293 | printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); | |
3294 | ||
3295 | nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); | |
3296 | ||
3297 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) | |
3298 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); | |
3299 | ||
3300 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
3301 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; | |
3302 | ||
3303 | return 0; | |
3304 | } | |
3305 | ||
3306 | static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3307 | { | |
3308 | u32 cpu_based_vm_exec_control; | |
3309 | ||
3310 | /* clear pending NMI */ | |
3311 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | |
3312 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; | |
3313 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | |
3314 | ++vcpu->stat.nmi_window_exits; | |
3315 | ||
3316 | return 1; | |
3317 | } | |
3318 | ||
3319 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |
3320 | struct kvm_run *kvm_run) | |
3321 | { | |
3322 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3323 | enum emulation_result err = EMULATE_DONE; | |
3324 | ||
3325 | local_irq_enable(); | |
3326 | preempt_enable(); | |
3327 | ||
3328 | while (!guest_state_valid(vcpu)) { | |
3329 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | |
3330 | ||
3331 | if (err == EMULATE_DO_MMIO) | |
3332 | break; | |
3333 | ||
3334 | if (err != EMULATE_DONE) { | |
3335 | kvm_report_emulation_failure(vcpu, "emulation failure"); | |
3336 | break; | |
3337 | } | |
3338 | ||
3339 | if (signal_pending(current)) | |
3340 | break; | |
3341 | if (need_resched()) | |
3342 | schedule(); | |
3343 | } | |
3344 | ||
3345 | preempt_disable(); | |
3346 | local_irq_disable(); | |
3347 | ||
3348 | vmx->invalid_state_emulation_result = err; | |
3349 | } | |
3350 | ||
3351 | /* | |
3352 | * The exit handlers return 1 if the exit was handled fully and guest execution | |
3353 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | |
3354 | * to be done to userspace and return 0. | |
3355 | */ | |
3356 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |
3357 | struct kvm_run *kvm_run) = { | |
3358 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | |
3359 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | |
3360 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | |
3361 | [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, | |
3362 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, | |
3363 | [EXIT_REASON_CR_ACCESS] = handle_cr, | |
3364 | [EXIT_REASON_DR_ACCESS] = handle_dr, | |
3365 | [EXIT_REASON_CPUID] = handle_cpuid, | |
3366 | [EXIT_REASON_MSR_READ] = handle_rdmsr, | |
3367 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, | |
3368 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, | |
3369 | [EXIT_REASON_HLT] = handle_halt, | |
3370 | [EXIT_REASON_INVLPG] = handle_invlpg, | |
3371 | [EXIT_REASON_VMCALL] = handle_vmcall, | |
3372 | [EXIT_REASON_VMCLEAR] = handle_vmx_insn, | |
3373 | [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, | |
3374 | [EXIT_REASON_VMPTRLD] = handle_vmx_insn, | |
3375 | [EXIT_REASON_VMPTRST] = handle_vmx_insn, | |
3376 | [EXIT_REASON_VMREAD] = handle_vmx_insn, | |
3377 | [EXIT_REASON_VMRESUME] = handle_vmx_insn, | |
3378 | [EXIT_REASON_VMWRITE] = handle_vmx_insn, | |
3379 | [EXIT_REASON_VMOFF] = handle_vmx_insn, | |
3380 | [EXIT_REASON_VMON] = handle_vmx_insn, | |
3381 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, | |
3382 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, | |
3383 | [EXIT_REASON_WBINVD] = handle_wbinvd, | |
3384 | [EXIT_REASON_TASK_SWITCH] = handle_task_switch, | |
3385 | [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, | |
3386 | [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, | |
3387 | [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, | |
3388 | }; | |
3389 | ||
3390 | static const int kvm_vmx_max_exit_handlers = | |
3391 | ARRAY_SIZE(kvm_vmx_exit_handlers); | |
3392 | ||
3393 | /* | |
3394 | * The guest has exited. See if we can fix it or if we need userspace | |
3395 | * assistance. | |
3396 | */ | |
3397 | static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |
3398 | { | |
3399 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3400 | u32 exit_reason = vmx->exit_reason; | |
3401 | u32 vectoring_info = vmx->idt_vectoring_info; | |
3402 | ||
3403 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); | |
3404 | ||
3405 | /* If we need to emulate an MMIO from handle_invalid_guest_state | |
3406 | * we just return 0 */ | |
3407 | if (vmx->emulation_required && emulate_invalid_guest_state) { | |
3408 | if (guest_state_valid(vcpu)) | |
3409 | vmx->emulation_required = 0; | |
3410 | return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO; | |
3411 | } | |
3412 | ||
3413 | /* Access CR3 don't cause VMExit in paging mode, so we need | |
3414 | * to sync with guest real CR3. */ | |
3415 | if (enable_ept && is_paging(vcpu)) | |
3416 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | |
3417 | ||
3418 | if (unlikely(vmx->fail)) { | |
3419 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
3420 | kvm_run->fail_entry.hardware_entry_failure_reason | |
3421 | = vmcs_read32(VM_INSTRUCTION_ERROR); | |
3422 | return 0; | |
3423 | } | |
3424 | ||
3425 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && | |
3426 | (exit_reason != EXIT_REASON_EXCEPTION_NMI && | |
3427 | exit_reason != EXIT_REASON_EPT_VIOLATION && | |
3428 | exit_reason != EXIT_REASON_TASK_SWITCH)) | |
3429 | printk(KERN_WARNING "%s: unexpected, valid vectoring info " | |
3430 | "(0x%x) and exit reason is 0x%x\n", | |
3431 | __func__, vectoring_info, exit_reason); | |
3432 | ||
3433 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { | |
3434 | if (vmx_interrupt_allowed(vcpu)) { | |
3435 | vmx->soft_vnmi_blocked = 0; | |
3436 | } else if (vmx->vnmi_blocked_time > 1000000000LL && | |
3437 | vcpu->arch.nmi_pending) { | |
3438 | /* | |
3439 | * This CPU don't support us in finding the end of an | |
3440 | * NMI-blocked window if the guest runs with IRQs | |
3441 | * disabled. So we pull the trigger after 1 s of | |
3442 | * futile waiting, but inform the user about this. | |
3443 | */ | |
3444 | printk(KERN_WARNING "%s: Breaking out of NMI-blocked " | |
3445 | "state on VCPU %d after 1 s timeout\n", | |
3446 | __func__, vcpu->vcpu_id); | |
3447 | vmx->soft_vnmi_blocked = 0; | |
3448 | } | |
3449 | } | |
3450 | ||
3451 | if (exit_reason < kvm_vmx_max_exit_handlers | |
3452 | && kvm_vmx_exit_handlers[exit_reason]) | |
3453 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | |
3454 | else { | |
3455 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
3456 | kvm_run->hw.hardware_exit_reason = exit_reason; | |
3457 | } | |
3458 | return 0; | |
3459 | } | |
3460 | ||
3461 | static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) | |
3462 | { | |
3463 | if (irr == -1 || tpr < irr) { | |
3464 | vmcs_write32(TPR_THRESHOLD, 0); | |
3465 | return; | |
3466 | } | |
3467 | ||
3468 | vmcs_write32(TPR_THRESHOLD, irr); | |
3469 | } | |
3470 | ||
3471 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |
3472 | { | |
3473 | u32 exit_intr_info; | |
3474 | u32 idt_vectoring_info = vmx->idt_vectoring_info; | |
3475 | bool unblock_nmi; | |
3476 | u8 vector; | |
3477 | int type; | |
3478 | bool idtv_info_valid; | |
3479 | ||
3480 | exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | |
3481 | ||
3482 | vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); | |
3483 | ||
3484 | /* Handle machine checks before interrupts are enabled */ | |
3485 | if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) | |
3486 | || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI | |
3487 | && is_machine_check(exit_intr_info))) | |
3488 | kvm_machine_check(); | |
3489 | ||
3490 | /* We need to handle NMIs before interrupts are enabled */ | |
3491 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && | |
3492 | (exit_intr_info & INTR_INFO_VALID_MASK)) | |
3493 | asm("int $2"); | |
3494 | ||
3495 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; | |
3496 | ||
3497 | if (cpu_has_virtual_nmis()) { | |
3498 | unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; | |
3499 | vector = exit_intr_info & INTR_INFO_VECTOR_MASK; | |
3500 | /* | |
3501 | * SDM 3: 27.7.1.2 (September 2008) | |
3502 | * Re-set bit "block by NMI" before VM entry if vmexit caused by | |
3503 | * a guest IRET fault. | |
3504 | * SDM 3: 23.2.2 (September 2008) | |
3505 | * Bit 12 is undefined in any of the following cases: | |
3506 | * If the VM exit sets the valid bit in the IDT-vectoring | |
3507 | * information field. | |
3508 | * If the VM exit is due to a double fault. | |
3509 | */ | |
3510 | if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && | |
3511 | vector != DF_VECTOR && !idtv_info_valid) | |
3512 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | |
3513 | GUEST_INTR_STATE_NMI); | |
3514 | } else if (unlikely(vmx->soft_vnmi_blocked)) | |
3515 | vmx->vnmi_blocked_time += | |
3516 | ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); | |
3517 | ||
3518 | vmx->vcpu.arch.nmi_injected = false; | |
3519 | kvm_clear_exception_queue(&vmx->vcpu); | |
3520 | kvm_clear_interrupt_queue(&vmx->vcpu); | |
3521 | ||
3522 | if (!idtv_info_valid) | |
3523 | return; | |
3524 | ||
3525 | vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; | |
3526 | type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; | |
3527 | ||
3528 | switch (type) { | |
3529 | case INTR_TYPE_NMI_INTR: | |
3530 | vmx->vcpu.arch.nmi_injected = true; | |
3531 | /* | |
3532 | * SDM 3: 27.7.1.2 (September 2008) | |
3533 | * Clear bit "block by NMI" before VM entry if a NMI | |
3534 | * delivery faulted. | |
3535 | */ | |
3536 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | |
3537 | GUEST_INTR_STATE_NMI); | |
3538 | break; | |
3539 | case INTR_TYPE_SOFT_EXCEPTION: | |
3540 | vmx->vcpu.arch.event_exit_inst_len = | |
3541 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | |
3542 | /* fall through */ | |
3543 | case INTR_TYPE_HARD_EXCEPTION: | |
3544 | if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { | |
3545 | u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE); | |
3546 | kvm_queue_exception_e(&vmx->vcpu, vector, err); | |
3547 | } else | |
3548 | kvm_queue_exception(&vmx->vcpu, vector); | |
3549 | break; | |
3550 | case INTR_TYPE_SOFT_INTR: | |
3551 | vmx->vcpu.arch.event_exit_inst_len = | |
3552 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | |
3553 | /* fall through */ | |
3554 | case INTR_TYPE_EXT_INTR: | |
3555 | kvm_queue_interrupt(&vmx->vcpu, vector, | |
3556 | type == INTR_TYPE_SOFT_INTR); | |
3557 | break; | |
3558 | default: | |
3559 | break; | |
3560 | } | |
3561 | } | |
3562 | ||
3563 | /* | |
3564 | * Failure to inject an interrupt should give us the information | |
3565 | * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs | |
3566 | * when fetching the interrupt redirection bitmap in the real-mode | |
3567 | * tss, this doesn't happen. So we do it ourselves. | |
3568 | */ | |
3569 | static void fixup_rmode_irq(struct vcpu_vmx *vmx) | |
3570 | { | |
3571 | vmx->rmode.irq.pending = 0; | |
3572 | if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip) | |
3573 | return; | |
3574 | kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip); | |
3575 | if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { | |
3576 | vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; | |
3577 | vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; | |
3578 | return; | |
3579 | } | |
3580 | vmx->idt_vectoring_info = | |
3581 | VECTORING_INFO_VALID_MASK | |
3582 | | INTR_TYPE_EXT_INTR | |
3583 | | vmx->rmode.irq.vector; | |
3584 | } | |
3585 | ||
3586 | #ifdef CONFIG_X86_64 | |
3587 | #define R "r" | |
3588 | #define Q "q" | |
3589 | #else | |
3590 | #define R "e" | |
3591 | #define Q "l" | |
3592 | #endif | |
3593 | ||
3594 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
3595 | { | |
3596 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3597 | ||
3598 | if (enable_ept && is_paging(vcpu)) { | |
3599 | vmcs_writel(GUEST_CR3, vcpu->arch.cr3); | |
3600 | ept_load_pdptrs(vcpu); | |
3601 | } | |
3602 | /* Record the guest's net vcpu time for enforced NMI injections. */ | |
3603 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) | |
3604 | vmx->entry_time = ktime_get(); | |
3605 | ||
3606 | /* Handle invalid guest state instead of entering VMX */ | |
3607 | if (vmx->emulation_required && emulate_invalid_guest_state) { | |
3608 | handle_invalid_guest_state(vcpu, kvm_run); | |
3609 | return; | |
3610 | } | |
3611 | ||
3612 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) | |
3613 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | |
3614 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) | |
3615 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); | |
3616 | ||
3617 | /* When single-stepping over STI and MOV SS, we must clear the | |
3618 | * corresponding interruptibility bits in the guest state. Otherwise | |
3619 | * vmentry fails as it then expects bit 14 (BS) in pending debug | |
3620 | * exceptions being set, but that's not correct for the guest debugging | |
3621 | * case. */ | |
3622 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | |
3623 | vmx_set_interrupt_shadow(vcpu, 0); | |
3624 | ||
3625 | /* | |
3626 | * Loading guest fpu may have cleared host cr0.ts | |
3627 | */ | |
3628 | vmcs_writel(HOST_CR0, read_cr0()); | |
3629 | ||
3630 | set_debugreg(vcpu->arch.dr6, 6); | |
3631 | ||
3632 | asm( | |
3633 | /* Store host registers */ | |
3634 | "push %%"R"dx; push %%"R"bp;" | |
3635 | "push %%"R"cx \n\t" | |
3636 | "cmp %%"R"sp, %c[host_rsp](%0) \n\t" | |
3637 | "je 1f \n\t" | |
3638 | "mov %%"R"sp, %c[host_rsp](%0) \n\t" | |
3639 | __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" | |
3640 | "1: \n\t" | |
3641 | /* Reload cr2 if changed */ | |
3642 | "mov %c[cr2](%0), %%"R"ax \n\t" | |
3643 | "mov %%cr2, %%"R"dx \n\t" | |
3644 | "cmp %%"R"ax, %%"R"dx \n\t" | |
3645 | "je 2f \n\t" | |
3646 | "mov %%"R"ax, %%cr2 \n\t" | |
3647 | "2: \n\t" | |
3648 | /* Check if vmlaunch of vmresume is needed */ | |
3649 | "cmpl $0, %c[launched](%0) \n\t" | |
3650 | /* Load guest registers. Don't clobber flags. */ | |
3651 | "mov %c[rax](%0), %%"R"ax \n\t" | |
3652 | "mov %c[rbx](%0), %%"R"bx \n\t" | |
3653 | "mov %c[rdx](%0), %%"R"dx \n\t" | |
3654 | "mov %c[rsi](%0), %%"R"si \n\t" | |
3655 | "mov %c[rdi](%0), %%"R"di \n\t" | |
3656 | "mov %c[rbp](%0), %%"R"bp \n\t" | |
3657 | #ifdef CONFIG_X86_64 | |
3658 | "mov %c[r8](%0), %%r8 \n\t" | |
3659 | "mov %c[r9](%0), %%r9 \n\t" | |
3660 | "mov %c[r10](%0), %%r10 \n\t" | |
3661 | "mov %c[r11](%0), %%r11 \n\t" | |
3662 | "mov %c[r12](%0), %%r12 \n\t" | |
3663 | "mov %c[r13](%0), %%r13 \n\t" | |
3664 | "mov %c[r14](%0), %%r14 \n\t" | |
3665 | "mov %c[r15](%0), %%r15 \n\t" | |
3666 | #endif | |
3667 | "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */ | |
3668 | ||
3669 | /* Enter guest mode */ | |
3670 | "jne .Llaunched \n\t" | |
3671 | __ex(ASM_VMX_VMLAUNCH) "\n\t" | |
3672 | "jmp .Lkvm_vmx_return \n\t" | |
3673 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" | |
3674 | ".Lkvm_vmx_return: " | |
3675 | /* Save guest registers, load host registers, keep flags */ | |
3676 | "xchg %0, (%%"R"sp) \n\t" | |
3677 | "mov %%"R"ax, %c[rax](%0) \n\t" | |
3678 | "mov %%"R"bx, %c[rbx](%0) \n\t" | |
3679 | "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t" | |
3680 | "mov %%"R"dx, %c[rdx](%0) \n\t" | |
3681 | "mov %%"R"si, %c[rsi](%0) \n\t" | |
3682 | "mov %%"R"di, %c[rdi](%0) \n\t" | |
3683 | "mov %%"R"bp, %c[rbp](%0) \n\t" | |
3684 | #ifdef CONFIG_X86_64 | |
3685 | "mov %%r8, %c[r8](%0) \n\t" | |
3686 | "mov %%r9, %c[r9](%0) \n\t" | |
3687 | "mov %%r10, %c[r10](%0) \n\t" | |
3688 | "mov %%r11, %c[r11](%0) \n\t" | |
3689 | "mov %%r12, %c[r12](%0) \n\t" | |
3690 | "mov %%r13, %c[r13](%0) \n\t" | |
3691 | "mov %%r14, %c[r14](%0) \n\t" | |
3692 | "mov %%r15, %c[r15](%0) \n\t" | |
3693 | #endif | |
3694 | "mov %%cr2, %%"R"ax \n\t" | |
3695 | "mov %%"R"ax, %c[cr2](%0) \n\t" | |
3696 | ||
3697 | "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t" | |
3698 | "setbe %c[fail](%0) \n\t" | |
3699 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), | |
3700 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), | |
3701 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), | |
3702 | [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), | |
3703 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), | |
3704 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), | |
3705 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), | |
3706 | [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), | |
3707 | [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), | |
3708 | [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), | |
3709 | [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), | |
3710 | #ifdef CONFIG_X86_64 | |
3711 | [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), | |
3712 | [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), | |
3713 | [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), | |
3714 | [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), | |
3715 | [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), | |
3716 | [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), | |
3717 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), | |
3718 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), | |
3719 | #endif | |
3720 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) | |
3721 | : "cc", "memory" | |
3722 | , R"bx", R"di", R"si" | |
3723 | #ifdef CONFIG_X86_64 | |
3724 | , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" | |
3725 | #endif | |
3726 | ); | |
3727 | ||
3728 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | |
3729 | | (1 << VCPU_EXREG_PDPTR)); | |
3730 | vcpu->arch.regs_dirty = 0; | |
3731 | ||
3732 | get_debugreg(vcpu->arch.dr6, 6); | |
3733 | ||
3734 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | |
3735 | if (vmx->rmode.irq.pending) | |
3736 | fixup_rmode_irq(vmx); | |
3737 | ||
3738 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | |
3739 | vmx->launched = 1; | |
3740 | ||
3741 | vmx_complete_interrupts(vmx); | |
3742 | } | |
3743 | ||
3744 | #undef R | |
3745 | #undef Q | |
3746 | ||
3747 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | |
3748 | { | |
3749 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3750 | ||
3751 | if (vmx->vmcs) { | |
3752 | vcpu_clear(vmx); | |
3753 | free_vmcs(vmx->vmcs); | |
3754 | vmx->vmcs = NULL; | |
3755 | } | |
3756 | } | |
3757 | ||
3758 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | |
3759 | { | |
3760 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3761 | ||
3762 | spin_lock(&vmx_vpid_lock); | |
3763 | if (vmx->vpid != 0) | |
3764 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); | |
3765 | spin_unlock(&vmx_vpid_lock); | |
3766 | vmx_free_vmcs(vcpu); | |
3767 | kfree(vmx->host_msrs); | |
3768 | kfree(vmx->guest_msrs); | |
3769 | kvm_vcpu_uninit(vcpu); | |
3770 | kmem_cache_free(kvm_vcpu_cache, vmx); | |
3771 | } | |
3772 | ||
3773 | static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |
3774 | { | |
3775 | int err; | |
3776 | struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | |
3777 | int cpu; | |
3778 | ||
3779 | if (!vmx) | |
3780 | return ERR_PTR(-ENOMEM); | |
3781 | ||
3782 | allocate_vpid(vmx); | |
3783 | ||
3784 | err = kvm_vcpu_init(&vmx->vcpu, kvm, id); | |
3785 | if (err) | |
3786 | goto free_vcpu; | |
3787 | ||
3788 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
3789 | if (!vmx->guest_msrs) { | |
3790 | err = -ENOMEM; | |
3791 | goto uninit_vcpu; | |
3792 | } | |
3793 | ||
3794 | vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
3795 | if (!vmx->host_msrs) | |
3796 | goto free_guest_msrs; | |
3797 | ||
3798 | vmx->vmcs = alloc_vmcs(); | |
3799 | if (!vmx->vmcs) | |
3800 | goto free_msrs; | |
3801 | ||
3802 | vmcs_clear(vmx->vmcs); | |
3803 | ||
3804 | cpu = get_cpu(); | |
3805 | vmx_vcpu_load(&vmx->vcpu, cpu); | |
3806 | err = vmx_vcpu_setup(vmx); | |
3807 | vmx_vcpu_put(&vmx->vcpu); | |
3808 | put_cpu(); | |
3809 | if (err) | |
3810 | goto free_vmcs; | |
3811 | if (vm_need_virtualize_apic_accesses(kvm)) | |
3812 | if (alloc_apic_access_page(kvm) != 0) | |
3813 | goto free_vmcs; | |
3814 | ||
3815 | if (enable_ept) { | |
3816 | if (!kvm->arch.ept_identity_map_addr) | |
3817 | kvm->arch.ept_identity_map_addr = | |
3818 | VMX_EPT_IDENTITY_PAGETABLE_ADDR; | |
3819 | if (alloc_identity_pagetable(kvm) != 0) | |
3820 | goto free_vmcs; | |
3821 | } | |
3822 | ||
3823 | return &vmx->vcpu; | |
3824 | ||
3825 | free_vmcs: | |
3826 | free_vmcs(vmx->vmcs); | |
3827 | free_msrs: | |
3828 | kfree(vmx->host_msrs); | |
3829 | free_guest_msrs: | |
3830 | kfree(vmx->guest_msrs); | |
3831 | uninit_vcpu: | |
3832 | kvm_vcpu_uninit(&vmx->vcpu); | |
3833 | free_vcpu: | |
3834 | kmem_cache_free(kvm_vcpu_cache, vmx); | |
3835 | return ERR_PTR(err); | |
3836 | } | |
3837 | ||
3838 | static void __init vmx_check_processor_compat(void *rtn) | |
3839 | { | |
3840 | struct vmcs_config vmcs_conf; | |
3841 | ||
3842 | *(int *)rtn = 0; | |
3843 | if (setup_vmcs_config(&vmcs_conf) < 0) | |
3844 | *(int *)rtn = -EIO; | |
3845 | if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { | |
3846 | printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", | |
3847 | smp_processor_id()); | |
3848 | *(int *)rtn = -EIO; | |
3849 | } | |
3850 | } | |
3851 | ||
3852 | static int get_ept_level(void) | |
3853 | { | |
3854 | return VMX_EPT_DEFAULT_GAW + 1; | |
3855 | } | |
3856 | ||
3857 | static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |
3858 | { | |
3859 | u64 ret; | |
3860 | ||
3861 | /* For VT-d and EPT combination | |
3862 | * 1. MMIO: always map as UC | |
3863 | * 2. EPT with VT-d: | |
3864 | * a. VT-d without snooping control feature: can't guarantee the | |
3865 | * result, try to trust guest. | |
3866 | * b. VT-d with snooping control feature: snooping control feature of | |
3867 | * VT-d engine can guarantee the cache correctness. Just set it | |
3868 | * to WB to keep consistent with host. So the same as item 3. | |
3869 | * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep | |
3870 | * consistent with host MTRR | |
3871 | */ | |
3872 | if (is_mmio) | |
3873 | ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; | |
3874 | else if (vcpu->kvm->arch.iommu_domain && | |
3875 | !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)) | |
3876 | ret = kvm_get_guest_memory_type(vcpu, gfn) << | |
3877 | VMX_EPT_MT_EPTE_SHIFT; | |
3878 | else | |
3879 | ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | |
3880 | | VMX_EPT_IGMT_BIT; | |
3881 | ||
3882 | return ret; | |
3883 | } | |
3884 | ||
3885 | static const struct trace_print_flags vmx_exit_reasons_str[] = { | |
3886 | { EXIT_REASON_EXCEPTION_NMI, "exception" }, | |
3887 | { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" }, | |
3888 | { EXIT_REASON_TRIPLE_FAULT, "triple_fault" }, | |
3889 | { EXIT_REASON_NMI_WINDOW, "nmi_window" }, | |
3890 | { EXIT_REASON_IO_INSTRUCTION, "io_instruction" }, | |
3891 | { EXIT_REASON_CR_ACCESS, "cr_access" }, | |
3892 | { EXIT_REASON_DR_ACCESS, "dr_access" }, | |
3893 | { EXIT_REASON_CPUID, "cpuid" }, | |
3894 | { EXIT_REASON_MSR_READ, "rdmsr" }, | |
3895 | { EXIT_REASON_MSR_WRITE, "wrmsr" }, | |
3896 | { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" }, | |
3897 | { EXIT_REASON_HLT, "halt" }, | |
3898 | { EXIT_REASON_INVLPG, "invlpg" }, | |
3899 | { EXIT_REASON_VMCALL, "hypercall" }, | |
3900 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" }, | |
3901 | { EXIT_REASON_APIC_ACCESS, "apic_access" }, | |
3902 | { EXIT_REASON_WBINVD, "wbinvd" }, | |
3903 | { EXIT_REASON_TASK_SWITCH, "task_switch" }, | |
3904 | { EXIT_REASON_EPT_VIOLATION, "ept_violation" }, | |
3905 | { -1, NULL } | |
3906 | }; | |
3907 | ||
3908 | static bool vmx_gb_page_enable(void) | |
3909 | { | |
3910 | return false; | |
3911 | } | |
3912 | ||
3913 | static struct kvm_x86_ops vmx_x86_ops = { | |
3914 | .cpu_has_kvm_support = cpu_has_kvm_support, | |
3915 | .disabled_by_bios = vmx_disabled_by_bios, | |
3916 | .hardware_setup = hardware_setup, | |
3917 | .hardware_unsetup = hardware_unsetup, | |
3918 | .check_processor_compatibility = vmx_check_processor_compat, | |
3919 | .hardware_enable = hardware_enable, | |
3920 | .hardware_disable = hardware_disable, | |
3921 | .cpu_has_accelerated_tpr = report_flexpriority, | |
3922 | ||
3923 | .vcpu_create = vmx_create_vcpu, | |
3924 | .vcpu_free = vmx_free_vcpu, | |
3925 | .vcpu_reset = vmx_vcpu_reset, | |
3926 | ||
3927 | .prepare_guest_switch = vmx_save_host_state, | |
3928 | .vcpu_load = vmx_vcpu_load, | |
3929 | .vcpu_put = vmx_vcpu_put, | |
3930 | ||
3931 | .set_guest_debug = set_guest_debug, | |
3932 | .get_msr = vmx_get_msr, | |
3933 | .set_msr = vmx_set_msr, | |
3934 | .get_segment_base = vmx_get_segment_base, | |
3935 | .get_segment = vmx_get_segment, | |
3936 | .set_segment = vmx_set_segment, | |
3937 | .get_cpl = vmx_get_cpl, | |
3938 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, | |
3939 | .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, | |
3940 | .set_cr0 = vmx_set_cr0, | |
3941 | .set_cr3 = vmx_set_cr3, | |
3942 | .set_cr4 = vmx_set_cr4, | |
3943 | .set_efer = vmx_set_efer, | |
3944 | .get_idt = vmx_get_idt, | |
3945 | .set_idt = vmx_set_idt, | |
3946 | .get_gdt = vmx_get_gdt, | |
3947 | .set_gdt = vmx_set_gdt, | |
3948 | .cache_reg = vmx_cache_reg, | |
3949 | .get_rflags = vmx_get_rflags, | |
3950 | .set_rflags = vmx_set_rflags, | |
3951 | ||
3952 | .tlb_flush = vmx_flush_tlb, | |
3953 | ||
3954 | .run = vmx_vcpu_run, | |
3955 | .handle_exit = vmx_handle_exit, | |
3956 | .skip_emulated_instruction = skip_emulated_instruction, | |
3957 | .set_interrupt_shadow = vmx_set_interrupt_shadow, | |
3958 | .get_interrupt_shadow = vmx_get_interrupt_shadow, | |
3959 | .patch_hypercall = vmx_patch_hypercall, | |
3960 | .set_irq = vmx_inject_irq, | |
3961 | .set_nmi = vmx_inject_nmi, | |
3962 | .queue_exception = vmx_queue_exception, | |
3963 | .interrupt_allowed = vmx_interrupt_allowed, | |
3964 | .nmi_allowed = vmx_nmi_allowed, | |
3965 | .enable_nmi_window = enable_nmi_window, | |
3966 | .enable_irq_window = enable_irq_window, | |
3967 | .update_cr8_intercept = update_cr8_intercept, | |
3968 | ||
3969 | .set_tss_addr = vmx_set_tss_addr, | |
3970 | .get_tdp_level = get_ept_level, | |
3971 | .get_mt_mask = vmx_get_mt_mask, | |
3972 | ||
3973 | .exit_reasons_str = vmx_exit_reasons_str, | |
3974 | .gb_page_enable = vmx_gb_page_enable, | |
3975 | }; | |
3976 | ||
3977 | static int __init vmx_init(void) | |
3978 | { | |
3979 | int r; | |
3980 | ||
3981 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); | |
3982 | if (!vmx_io_bitmap_a) | |
3983 | return -ENOMEM; | |
3984 | ||
3985 | vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); | |
3986 | if (!vmx_io_bitmap_b) { | |
3987 | r = -ENOMEM; | |
3988 | goto out; | |
3989 | } | |
3990 | ||
3991 | vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); | |
3992 | if (!vmx_msr_bitmap_legacy) { | |
3993 | r = -ENOMEM; | |
3994 | goto out1; | |
3995 | } | |
3996 | ||
3997 | vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); | |
3998 | if (!vmx_msr_bitmap_longmode) { | |
3999 | r = -ENOMEM; | |
4000 | goto out2; | |
4001 | } | |
4002 | ||
4003 | /* | |
4004 | * Allow direct access to the PC debug port (it is often used for I/O | |
4005 | * delays, but the vmexits simply slow things down). | |
4006 | */ | |
4007 | memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); | |
4008 | clear_bit(0x80, vmx_io_bitmap_a); | |
4009 | ||
4010 | memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); | |
4011 | ||
4012 | memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); | |
4013 | memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); | |
4014 | ||
4015 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ | |
4016 | ||
4017 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); | |
4018 | if (r) | |
4019 | goto out3; | |
4020 | ||
4021 | vmx_disable_intercept_for_msr(MSR_FS_BASE, false); | |
4022 | vmx_disable_intercept_for_msr(MSR_GS_BASE, false); | |
4023 | vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); | |
4024 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); | |
4025 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); | |
4026 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); | |
4027 | ||
4028 | if (enable_ept) { | |
4029 | bypass_guest_pf = 0; | |
4030 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | |
4031 | VMX_EPT_WRITABLE_MASK); | |
4032 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | |
4033 | VMX_EPT_EXECUTABLE_MASK); | |
4034 | kvm_enable_tdp(); | |
4035 | } else | |
4036 | kvm_disable_tdp(); | |
4037 | ||
4038 | if (bypass_guest_pf) | |
4039 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); | |
4040 | ||
4041 | ept_sync_global(); | |
4042 | ||
4043 | return 0; | |
4044 | ||
4045 | out3: | |
4046 | free_page((unsigned long)vmx_msr_bitmap_longmode); | |
4047 | out2: | |
4048 | free_page((unsigned long)vmx_msr_bitmap_legacy); | |
4049 | out1: | |
4050 | free_page((unsigned long)vmx_io_bitmap_b); | |
4051 | out: | |
4052 | free_page((unsigned long)vmx_io_bitmap_a); | |
4053 | return r; | |
4054 | } | |
4055 | ||
4056 | static void __exit vmx_exit(void) | |
4057 | { | |
4058 | free_page((unsigned long)vmx_msr_bitmap_legacy); | |
4059 | free_page((unsigned long)vmx_msr_bitmap_longmode); | |
4060 | free_page((unsigned long)vmx_io_bitmap_b); | |
4061 | free_page((unsigned long)vmx_io_bitmap_a); | |
4062 | ||
4063 | kvm_exit(); | |
4064 | } | |
4065 | ||
4066 | module_init(vmx_init) | |
4067 | module_exit(vmx_exit) |