2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
16 #include <linux/kvm_host.h>
20 #include "kvm_cache_regs.h"
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28 #include <linux/ftrace_event.h>
32 #include <asm/virtext.h>
35 #define __ex(x) __kvm_handle_fault_on_reboot(x)
37 MODULE_AUTHOR("Qumranet");
38 MODULE_LICENSE("GPL");
40 #define IOPM_ALLOC_ORDER 2
41 #define MSRPM_ALLOC_ORDER 1
43 #define SEG_TYPE_LDT 2
44 #define SEG_TYPE_BUSY_TSS16 3
46 #define SVM_FEATURE_NPT (1 << 0)
47 #define SVM_FEATURE_LBRV (1 << 1)
48 #define SVM_FEATURE_SVML (1 << 2)
50 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
52 /* Turn on to get debugging output*/
53 /* #define NESTED_DEBUG */
56 #define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
58 #define nsvm_printk(fmt, args...) do {} while(0)
61 static const u32 host_save_user_msrs
[] = {
63 MSR_STAR
, MSR_LSTAR
, MSR_CSTAR
, MSR_SYSCALL_MASK
, MSR_KERNEL_GS_BASE
,
66 MSR_IA32_SYSENTER_CS
, MSR_IA32_SYSENTER_ESP
, MSR_IA32_SYSENTER_EIP
,
69 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
76 unsigned long vmcb_pa
;
77 struct svm_cpu_data
*svm_data
;
78 uint64_t asid_generation
;
79 uint64_t sysenter_esp
;
80 uint64_t sysenter_eip
;
84 u64 host_user_msrs
[NR_HOST_SAVE_USER_MSRS
];
93 /* These are the merged vectors */
96 /* gpa pointers to the real vectors */
97 u64 nested_vmcb_msrpm
;
100 /* enable NPT for AMD64 and X86 with PAE */
101 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
102 static bool npt_enabled
= true;
104 static bool npt_enabled
= false;
108 module_param(npt
, int, S_IRUGO
);
110 static int nested
= 0;
111 module_param(nested
, int, S_IRUGO
);
113 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
);
115 static int nested_svm_exit_handled(struct vcpu_svm
*svm
, bool kvm_override
);
116 static int nested_svm_vmexit(struct vcpu_svm
*svm
);
117 static int nested_svm_vmsave(struct vcpu_svm
*svm
, void *nested_vmcb
,
118 void *arg2
, void *opaque
);
119 static int nested_svm_check_exception(struct vcpu_svm
*svm
, unsigned nr
,
120 bool has_error_code
, u32 error_code
);
122 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
124 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
127 static inline bool is_nested(struct vcpu_svm
*svm
)
129 return svm
->nested_vmcb
;
132 static inline void enable_gif(struct vcpu_svm
*svm
)
134 svm
->vcpu
.arch
.hflags
|= HF_GIF_MASK
;
137 static inline void disable_gif(struct vcpu_svm
*svm
)
139 svm
->vcpu
.arch
.hflags
&= ~HF_GIF_MASK
;
142 static inline bool gif_set(struct vcpu_svm
*svm
)
144 return !!(svm
->vcpu
.arch
.hflags
& HF_GIF_MASK
);
147 static unsigned long iopm_base
;
149 struct kvm_ldttss_desc
{
152 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
153 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
156 } __attribute__((packed
));
158 struct svm_cpu_data
{
164 struct kvm_ldttss_desc
*tss_desc
;
166 struct page
*save_area
;
169 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
170 static uint32_t svm_features
;
172 struct svm_init_data
{
177 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
179 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
180 #define MSRS_RANGE_SIZE 2048
181 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
183 #define MAX_INST_SIZE 15
185 static inline u32
svm_has(u32 feat
)
187 return svm_features
& feat
;
190 static inline void clgi(void)
192 asm volatile (__ex(SVM_CLGI
));
195 static inline void stgi(void)
197 asm volatile (__ex(SVM_STGI
));
200 static inline void invlpga(unsigned long addr
, u32 asid
)
202 asm volatile (__ex(SVM_INVLPGA
) :: "a"(addr
), "c"(asid
));
205 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
207 to_svm(vcpu
)->asid_generation
--;
210 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
212 force_new_asid(vcpu
);
215 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
217 if (!npt_enabled
&& !(efer
& EFER_LMA
))
220 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| EFER_SVME
;
221 vcpu
->arch
.shadow_efer
= efer
;
224 static void svm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
,
225 bool has_error_code
, u32 error_code
)
227 struct vcpu_svm
*svm
= to_svm(vcpu
);
229 /* If we are within a nested VM we'd better #VMEXIT and let the
230 guest handle the exception */
231 if (nested_svm_check_exception(svm
, nr
, has_error_code
, error_code
))
234 svm
->vmcb
->control
.event_inj
= nr
236 | (has_error_code
? SVM_EVTINJ_VALID_ERR
: 0)
237 | SVM_EVTINJ_TYPE_EXEPT
;
238 svm
->vmcb
->control
.event_inj_err
= error_code
;
241 static int is_external_interrupt(u32 info
)
243 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
244 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
247 static u32
svm_get_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
)
249 struct vcpu_svm
*svm
= to_svm(vcpu
);
252 if (svm
->vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
)
253 ret
|= X86_SHADOW_INT_STI
| X86_SHADOW_INT_MOV_SS
;
257 static void svm_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
)
259 struct vcpu_svm
*svm
= to_svm(vcpu
);
262 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
264 svm
->vmcb
->control
.int_state
|= SVM_INTERRUPT_SHADOW_MASK
;
268 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
270 struct vcpu_svm
*svm
= to_svm(vcpu
);
272 if (!svm
->next_rip
) {
273 if (emulate_instruction(vcpu
, vcpu
->run
, 0, 0, EMULTYPE_SKIP
) !=
275 printk(KERN_DEBUG
"%s: NOP\n", __func__
);
278 if (svm
->next_rip
- kvm_rip_read(vcpu
) > MAX_INST_SIZE
)
279 printk(KERN_ERR
"%s: ip 0x%lx next 0x%llx\n",
280 __func__
, kvm_rip_read(vcpu
), svm
->next_rip
);
282 kvm_rip_write(vcpu
, svm
->next_rip
);
283 svm_set_interrupt_shadow(vcpu
, 0);
286 static int has_svm(void)
290 if (!cpu_has_svm(&msg
)) {
291 printk(KERN_INFO
"has_svm: %s\n", msg
);
298 static void svm_hardware_disable(void *garbage
)
303 static void svm_hardware_enable(void *garbage
)
306 struct svm_cpu_data
*svm_data
;
308 struct descriptor_table gdt_descr
;
309 struct desc_struct
*gdt
;
310 int me
= raw_smp_processor_id();
313 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
316 svm_data
= per_cpu(svm_data
, me
);
319 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
324 svm_data
->asid_generation
= 1;
325 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
326 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
328 kvm_get_gdt(&gdt_descr
);
329 gdt
= (struct desc_struct
*)gdt_descr
.base
;
330 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
332 rdmsrl(MSR_EFER
, efer
);
333 wrmsrl(MSR_EFER
, efer
| EFER_SVME
);
335 wrmsrl(MSR_VM_HSAVE_PA
,
336 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
339 static void svm_cpu_uninit(int cpu
)
341 struct svm_cpu_data
*svm_data
342 = per_cpu(svm_data
, raw_smp_processor_id());
347 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
348 __free_page(svm_data
->save_area
);
352 static int svm_cpu_init(int cpu
)
354 struct svm_cpu_data
*svm_data
;
357 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
361 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
363 if (!svm_data
->save_area
)
366 per_cpu(svm_data
, cpu
) = svm_data
;
376 static void set_msr_interception(u32
*msrpm
, unsigned msr
,
381 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
382 if (msr
>= msrpm_ranges
[i
] &&
383 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
384 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
385 msrpm_ranges
[i
]) * 2;
387 u32
*base
= msrpm
+ (msr_offset
/ 32);
388 u32 msr_shift
= msr_offset
% 32;
389 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
390 *base
= (*base
& ~(0x3 << msr_shift
)) |
398 static void svm_vcpu_init_msrpm(u32
*msrpm
)
400 memset(msrpm
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
403 set_msr_interception(msrpm
, MSR_GS_BASE
, 1, 1);
404 set_msr_interception(msrpm
, MSR_FS_BASE
, 1, 1);
405 set_msr_interception(msrpm
, MSR_KERNEL_GS_BASE
, 1, 1);
406 set_msr_interception(msrpm
, MSR_LSTAR
, 1, 1);
407 set_msr_interception(msrpm
, MSR_CSTAR
, 1, 1);
408 set_msr_interception(msrpm
, MSR_SYSCALL_MASK
, 1, 1);
410 set_msr_interception(msrpm
, MSR_K6_STAR
, 1, 1);
411 set_msr_interception(msrpm
, MSR_IA32_SYSENTER_CS
, 1, 1);
414 static void svm_enable_lbrv(struct vcpu_svm
*svm
)
416 u32
*msrpm
= svm
->msrpm
;
418 svm
->vmcb
->control
.lbr_ctl
= 1;
419 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 1, 1);
420 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHTOIP
, 1, 1);
421 set_msr_interception(msrpm
, MSR_IA32_LASTINTFROMIP
, 1, 1);
422 set_msr_interception(msrpm
, MSR_IA32_LASTINTTOIP
, 1, 1);
425 static void svm_disable_lbrv(struct vcpu_svm
*svm
)
427 u32
*msrpm
= svm
->msrpm
;
429 svm
->vmcb
->control
.lbr_ctl
= 0;
430 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 0, 0);
431 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHTOIP
, 0, 0);
432 set_msr_interception(msrpm
, MSR_IA32_LASTINTFROMIP
, 0, 0);
433 set_msr_interception(msrpm
, MSR_IA32_LASTINTTOIP
, 0, 0);
436 static __init
int svm_hardware_setup(void)
439 struct page
*iopm_pages
;
443 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
448 iopm_va
= page_address(iopm_pages
);
449 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
450 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
452 if (boot_cpu_has(X86_FEATURE_NX
))
453 kvm_enable_efer_bits(EFER_NX
);
455 if (boot_cpu_has(X86_FEATURE_FXSR_OPT
))
456 kvm_enable_efer_bits(EFER_FFXSR
);
459 printk(KERN_INFO
"kvm: Nested Virtualization enabled\n");
460 kvm_enable_efer_bits(EFER_SVME
);
463 for_each_online_cpu(cpu
) {
464 r
= svm_cpu_init(cpu
);
469 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
471 if (!svm_has(SVM_FEATURE_NPT
))
474 if (npt_enabled
&& !npt
) {
475 printk(KERN_INFO
"kvm: Nested Paging disabled\n");
480 printk(KERN_INFO
"kvm: Nested Paging enabled\n");
488 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
493 static __exit
void svm_hardware_unsetup(void)
497 for_each_online_cpu(cpu
)
500 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
504 static void init_seg(struct vmcb_seg
*seg
)
507 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
508 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
513 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
516 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
521 static void init_vmcb(struct vcpu_svm
*svm
)
523 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
524 struct vmcb_save_area
*save
= &svm
->vmcb
->save
;
526 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
530 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
535 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
540 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
547 control
->intercept_exceptions
= (1 << PF_VECTOR
) |
552 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
553 (1ULL << INTERCEPT_NMI
) |
554 (1ULL << INTERCEPT_SMI
) |
555 (1ULL << INTERCEPT_CPUID
) |
556 (1ULL << INTERCEPT_INVD
) |
557 (1ULL << INTERCEPT_HLT
) |
558 (1ULL << INTERCEPT_INVLPG
) |
559 (1ULL << INTERCEPT_INVLPGA
) |
560 (1ULL << INTERCEPT_IOIO_PROT
) |
561 (1ULL << INTERCEPT_MSR_PROT
) |
562 (1ULL << INTERCEPT_TASK_SWITCH
) |
563 (1ULL << INTERCEPT_SHUTDOWN
) |
564 (1ULL << INTERCEPT_VMRUN
) |
565 (1ULL << INTERCEPT_VMMCALL
) |
566 (1ULL << INTERCEPT_VMLOAD
) |
567 (1ULL << INTERCEPT_VMSAVE
) |
568 (1ULL << INTERCEPT_STGI
) |
569 (1ULL << INTERCEPT_CLGI
) |
570 (1ULL << INTERCEPT_SKINIT
) |
571 (1ULL << INTERCEPT_WBINVD
) |
572 (1ULL << INTERCEPT_MONITOR
) |
573 (1ULL << INTERCEPT_MWAIT
);
575 control
->iopm_base_pa
= iopm_base
;
576 control
->msrpm_base_pa
= __pa(svm
->msrpm
);
577 control
->tsc_offset
= 0;
578 control
->int_ctl
= V_INTR_MASKING_MASK
;
586 save
->cs
.selector
= 0xf000;
587 /* Executable/Readable Code Segment */
588 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
589 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
590 save
->cs
.limit
= 0xffff;
592 * cs.base should really be 0xffff0000, but vmx can't handle that, so
593 * be consistent with it.
595 * Replace when we have real mode working for vmx.
597 save
->cs
.base
= 0xf0000;
599 save
->gdtr
.limit
= 0xffff;
600 save
->idtr
.limit
= 0xffff;
602 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
603 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
605 save
->efer
= EFER_SVME
;
606 save
->dr6
= 0xffff0ff0;
609 save
->rip
= 0x0000fff0;
610 svm
->vcpu
.arch
.regs
[VCPU_REGS_RIP
] = save
->rip
;
613 * cr0 val on cpu init should be 0x60000010, we enable cpu
614 * cache by default. the orderly way is to enable cache in bios.
616 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
617 save
->cr4
= X86_CR4_PAE
;
621 /* Setup VMCB for Nested Paging */
622 control
->nested_ctl
= 1;
623 control
->intercept
&= ~((1ULL << INTERCEPT_TASK_SWITCH
) |
624 (1ULL << INTERCEPT_INVLPG
));
625 control
->intercept_exceptions
&= ~(1 << PF_VECTOR
);
626 control
->intercept_cr_read
&= ~(INTERCEPT_CR0_MASK
|
628 control
->intercept_cr_write
&= ~(INTERCEPT_CR0_MASK
|
630 save
->g_pat
= 0x0007040600070406ULL
;
631 /* enable caching because the QEMU Bios doesn't enable it */
632 save
->cr0
= X86_CR0_ET
;
636 force_new_asid(&svm
->vcpu
);
638 svm
->nested_vmcb
= 0;
639 svm
->vcpu
.arch
.hflags
= 0;
644 static int svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
646 struct vcpu_svm
*svm
= to_svm(vcpu
);
650 if (!kvm_vcpu_is_bsp(vcpu
)) {
651 kvm_rip_write(vcpu
, 0);
652 svm
->vmcb
->save
.cs
.base
= svm
->vcpu
.arch
.sipi_vector
<< 12;
653 svm
->vmcb
->save
.cs
.selector
= svm
->vcpu
.arch
.sipi_vector
<< 8;
655 vcpu
->arch
.regs_avail
= ~0;
656 vcpu
->arch
.regs_dirty
= ~0;
661 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
663 struct vcpu_svm
*svm
;
665 struct page
*msrpm_pages
;
666 struct page
*hsave_page
;
667 struct page
*nested_msrpm_pages
;
670 svm
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
676 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
680 page
= alloc_page(GFP_KERNEL
);
687 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
691 nested_msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
692 if (!nested_msrpm_pages
)
695 svm
->msrpm
= page_address(msrpm_pages
);
696 svm_vcpu_init_msrpm(svm
->msrpm
);
698 hsave_page
= alloc_page(GFP_KERNEL
);
701 svm
->hsave
= page_address(hsave_page
);
703 svm
->nested_msrpm
= page_address(nested_msrpm_pages
);
705 svm
->vmcb
= page_address(page
);
706 clear_page(svm
->vmcb
);
707 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
708 svm
->asid_generation
= 0;
712 svm
->vcpu
.fpu_active
= 1;
713 svm
->vcpu
.arch
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
714 if (kvm_vcpu_is_bsp(&svm
->vcpu
))
715 svm
->vcpu
.arch
.apic_base
|= MSR_IA32_APICBASE_BSP
;
720 kvm_vcpu_uninit(&svm
->vcpu
);
722 kmem_cache_free(kvm_vcpu_cache
, svm
);
727 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
729 struct vcpu_svm
*svm
= to_svm(vcpu
);
731 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
732 __free_pages(virt_to_page(svm
->msrpm
), MSRPM_ALLOC_ORDER
);
733 __free_page(virt_to_page(svm
->hsave
));
734 __free_pages(virt_to_page(svm
->nested_msrpm
), MSRPM_ALLOC_ORDER
);
735 kvm_vcpu_uninit(vcpu
);
736 kmem_cache_free(kvm_vcpu_cache
, svm
);
739 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
741 struct vcpu_svm
*svm
= to_svm(vcpu
);
744 if (unlikely(cpu
!= vcpu
->cpu
)) {
748 * Make sure that the guest sees a monotonically
752 delta
= vcpu
->arch
.host_tsc
- tsc_this
;
753 svm
->vmcb
->control
.tsc_offset
+= delta
;
755 kvm_migrate_timers(vcpu
);
756 svm
->asid_generation
= 0;
759 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
760 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
763 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
765 struct vcpu_svm
*svm
= to_svm(vcpu
);
768 ++vcpu
->stat
.host_state_reload
;
769 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
770 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
772 rdtscll(vcpu
->arch
.host_tsc
);
775 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
777 return to_svm(vcpu
)->vmcb
->save
.rflags
;
780 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
782 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
785 static void svm_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
)
788 case VCPU_EXREG_PDPTR
:
789 BUG_ON(!npt_enabled
);
790 load_pdptrs(vcpu
, vcpu
->arch
.cr3
);
797 static void svm_set_vintr(struct vcpu_svm
*svm
)
799 svm
->vmcb
->control
.intercept
|= 1ULL << INTERCEPT_VINTR
;
802 static void svm_clear_vintr(struct vcpu_svm
*svm
)
804 svm
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VINTR
);
807 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
809 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
812 case VCPU_SREG_CS
: return &save
->cs
;
813 case VCPU_SREG_DS
: return &save
->ds
;
814 case VCPU_SREG_ES
: return &save
->es
;
815 case VCPU_SREG_FS
: return &save
->fs
;
816 case VCPU_SREG_GS
: return &save
->gs
;
817 case VCPU_SREG_SS
: return &save
->ss
;
818 case VCPU_SREG_TR
: return &save
->tr
;
819 case VCPU_SREG_LDTR
: return &save
->ldtr
;
825 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
827 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
832 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
833 struct kvm_segment
*var
, int seg
)
835 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
838 var
->limit
= s
->limit
;
839 var
->selector
= s
->selector
;
840 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
841 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
842 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
843 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
844 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
845 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
846 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
847 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
849 /* AMD's VMCB does not have an explicit unusable field, so emulate it
850 * for cross vendor migration purposes by "not present"
852 var
->unusable
= !var
->present
|| (var
->type
== 0);
857 * SVM always stores 0 for the 'G' bit in the CS selector in
858 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
859 * Intel's VMENTRY has a check on the 'G' bit.
861 var
->g
= s
->limit
> 0xfffff;
865 * Work around a bug where the busy flag in the tr selector
875 * The accessed bit must always be set in the segment
876 * descriptor cache, although it can be cleared in the
877 * descriptor, the cached bit always remains at 1. Since
878 * Intel has a check on this, set it here to support
879 * cross-vendor migration.
885 /* On AMD CPUs sometimes the DB bit in the segment
886 * descriptor is left as 1, although the whole segment has
887 * been made unusable. Clear it here to pass an Intel VMX
888 * entry check when cross vendor migrating.
896 static int svm_get_cpl(struct kvm_vcpu
*vcpu
)
898 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
903 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
905 struct vcpu_svm
*svm
= to_svm(vcpu
);
907 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
908 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
911 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
913 struct vcpu_svm
*svm
= to_svm(vcpu
);
915 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
916 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
919 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
921 struct vcpu_svm
*svm
= to_svm(vcpu
);
923 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
924 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
927 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
929 struct vcpu_svm
*svm
= to_svm(vcpu
);
931 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
932 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
935 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
939 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
941 struct vcpu_svm
*svm
= to_svm(vcpu
);
944 if (vcpu
->arch
.shadow_efer
& EFER_LME
) {
945 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
946 vcpu
->arch
.shadow_efer
|= EFER_LMA
;
947 svm
->vmcb
->save
.efer
|= EFER_LMA
| EFER_LME
;
950 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
)) {
951 vcpu
->arch
.shadow_efer
&= ~EFER_LMA
;
952 svm
->vmcb
->save
.efer
&= ~(EFER_LMA
| EFER_LME
);
959 if ((vcpu
->arch
.cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
960 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
961 vcpu
->fpu_active
= 1;
964 vcpu
->arch
.cr0
= cr0
;
965 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
966 if (!vcpu
->fpu_active
) {
967 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
972 * re-enable caching here because the QEMU bios
973 * does not do it - this results in some delay at
976 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
977 svm
->vmcb
->save
.cr0
= cr0
;
980 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
982 unsigned long host_cr4_mce
= read_cr4() & X86_CR4_MCE
;
983 unsigned long old_cr4
= to_svm(vcpu
)->vmcb
->save
.cr4
;
985 if (npt_enabled
&& ((old_cr4
^ cr4
) & X86_CR4_PGE
))
986 force_new_asid(vcpu
);
988 vcpu
->arch
.cr4
= cr4
;
992 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
;
995 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
996 struct kvm_segment
*var
, int seg
)
998 struct vcpu_svm
*svm
= to_svm(vcpu
);
999 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1001 s
->base
= var
->base
;
1002 s
->limit
= var
->limit
;
1003 s
->selector
= var
->selector
;
1007 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
1008 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
1009 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
1010 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
1011 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
1012 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
1013 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
1014 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
1016 if (seg
== VCPU_SREG_CS
)
1018 = (svm
->vmcb
->save
.cs
.attrib
1019 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
1023 static void update_db_intercept(struct kvm_vcpu
*vcpu
)
1025 struct vcpu_svm
*svm
= to_svm(vcpu
);
1027 svm
->vmcb
->control
.intercept_exceptions
&=
1028 ~((1 << DB_VECTOR
) | (1 << BP_VECTOR
));
1030 if (vcpu
->arch
.singlestep
)
1031 svm
->vmcb
->control
.intercept_exceptions
|= (1 << DB_VECTOR
);
1033 if (vcpu
->guest_debug
& KVM_GUESTDBG_ENABLE
) {
1034 if (vcpu
->guest_debug
&
1035 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))
1036 svm
->vmcb
->control
.intercept_exceptions
|=
1038 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
1039 svm
->vmcb
->control
.intercept_exceptions
|=
1042 vcpu
->guest_debug
= 0;
1045 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_guest_debug
*dbg
)
1047 int old_debug
= vcpu
->guest_debug
;
1048 struct vcpu_svm
*svm
= to_svm(vcpu
);
1050 vcpu
->guest_debug
= dbg
->control
;
1052 update_db_intercept(vcpu
);
1054 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
1055 svm
->vmcb
->save
.dr7
= dbg
->arch
.debugreg
[7];
1057 svm
->vmcb
->save
.dr7
= vcpu
->arch
.dr7
;
1059 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
)
1060 svm
->vmcb
->save
.rflags
|= X86_EFLAGS_TF
| X86_EFLAGS_RF
;
1061 else if (old_debug
& KVM_GUESTDBG_SINGLESTEP
)
1062 svm
->vmcb
->save
.rflags
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_RF
);
1067 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
1069 #ifdef CONFIG_X86_64
1070 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
1074 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
1076 #ifdef CONFIG_X86_64
1077 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
1081 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*svm_data
)
1083 if (svm_data
->next_asid
> svm_data
->max_asid
) {
1084 ++svm_data
->asid_generation
;
1085 svm_data
->next_asid
= 1;
1086 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
1089 svm
->asid_generation
= svm_data
->asid_generation
;
1090 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
1093 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
1095 struct vcpu_svm
*svm
= to_svm(vcpu
);
1100 val
= vcpu
->arch
.db
[dr
];
1103 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
1104 val
= vcpu
->arch
.dr6
;
1106 val
= svm
->vmcb
->save
.dr6
;
1109 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
1110 val
= vcpu
->arch
.dr7
;
1112 val
= svm
->vmcb
->save
.dr7
;
1121 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
1124 struct vcpu_svm
*svm
= to_svm(vcpu
);
1130 vcpu
->arch
.db
[dr
] = value
;
1131 if (!(vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
))
1132 vcpu
->arch
.eff_db
[dr
] = value
;
1135 if (vcpu
->arch
.cr4
& X86_CR4_DE
)
1136 *exception
= UD_VECTOR
;
1139 if (value
& 0xffffffff00000000ULL
) {
1140 *exception
= GP_VECTOR
;
1143 vcpu
->arch
.dr6
= (value
& DR6_VOLATILE
) | DR6_FIXED_1
;
1146 if (value
& 0xffffffff00000000ULL
) {
1147 *exception
= GP_VECTOR
;
1150 vcpu
->arch
.dr7
= (value
& DR7_VOLATILE
) | DR7_FIXED_1
;
1151 if (!(vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)) {
1152 svm
->vmcb
->save
.dr7
= vcpu
->arch
.dr7
;
1153 vcpu
->arch
.switch_db_regs
= (value
& DR7_BP_EN_MASK
);
1157 /* FIXME: Possible case? */
1158 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
1160 *exception
= UD_VECTOR
;
1165 static int pf_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1170 fault_address
= svm
->vmcb
->control
.exit_info_2
;
1171 error_code
= svm
->vmcb
->control
.exit_info_1
;
1173 trace_kvm_page_fault(fault_address
, error_code
);
1175 * FIXME: Tis shouldn't be necessary here, but there is a flush
1176 * missing in the MMU code. Until we find this bug, flush the
1177 * complete TLB here on an NPF
1180 svm_flush_tlb(&svm
->vcpu
);
1182 if (kvm_event_needs_reinjection(&svm
->vcpu
))
1183 kvm_mmu_unprotect_page_virt(&svm
->vcpu
, fault_address
);
1185 return kvm_mmu_page_fault(&svm
->vcpu
, fault_address
, error_code
);
1188 static int db_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1190 if (!(svm
->vcpu
.guest_debug
&
1191 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
)) &&
1192 !svm
->vcpu
.arch
.singlestep
) {
1193 kvm_queue_exception(&svm
->vcpu
, DB_VECTOR
);
1197 if (svm
->vcpu
.arch
.singlestep
) {
1198 svm
->vcpu
.arch
.singlestep
= false;
1199 if (!(svm
->vcpu
.guest_debug
& KVM_GUESTDBG_SINGLESTEP
))
1200 svm
->vmcb
->save
.rflags
&=
1201 ~(X86_EFLAGS_TF
| X86_EFLAGS_RF
);
1202 update_db_intercept(&svm
->vcpu
);
1205 if (svm
->vcpu
.guest_debug
&
1206 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
)){
1207 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
1208 kvm_run
->debug
.arch
.pc
=
1209 svm
->vmcb
->save
.cs
.base
+ svm
->vmcb
->save
.rip
;
1210 kvm_run
->debug
.arch
.exception
= DB_VECTOR
;
1217 static int bp_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1219 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
1220 kvm_run
->debug
.arch
.pc
= svm
->vmcb
->save
.cs
.base
+ svm
->vmcb
->save
.rip
;
1221 kvm_run
->debug
.arch
.exception
= BP_VECTOR
;
1225 static int ud_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1229 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0, EMULTYPE_TRAP_UD
);
1230 if (er
!= EMULATE_DONE
)
1231 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
1235 static int nm_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1237 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
1238 if (!(svm
->vcpu
.arch
.cr0
& X86_CR0_TS
))
1239 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
1240 svm
->vcpu
.fpu_active
= 1;
1245 static int mc_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1248 * On an #MC intercept the MCE handler is not called automatically in
1249 * the host. So do it by hand here.
1253 /* not sure if we ever come back to this point */
1258 static int shutdown_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1261 * VMCB is undefined after a SHUTDOWN intercept
1262 * so reinitialize it.
1264 clear_page(svm
->vmcb
);
1267 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
1271 static int io_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1273 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; /* address size bug? */
1274 int size
, in
, string
;
1277 ++svm
->vcpu
.stat
.io_exits
;
1279 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
1281 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1284 if (emulate_instruction(&svm
->vcpu
,
1285 kvm_run
, 0, 0, 0) == EMULATE_DO_MMIO
)
1290 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
1291 port
= io_info
>> 16;
1292 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
1294 skip_emulated_instruction(&svm
->vcpu
);
1295 return kvm_emulate_pio(&svm
->vcpu
, kvm_run
, in
, size
, port
);
1298 static int nmi_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1303 static int intr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1305 ++svm
->vcpu
.stat
.irq_exits
;
1309 static int nop_on_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1314 static int halt_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1316 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 1;
1317 skip_emulated_instruction(&svm
->vcpu
);
1318 return kvm_emulate_halt(&svm
->vcpu
);
1321 static int vmmcall_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1323 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1324 skip_emulated_instruction(&svm
->vcpu
);
1325 kvm_emulate_hypercall(&svm
->vcpu
);
1329 static int nested_svm_check_permissions(struct vcpu_svm
*svm
)
1331 if (!(svm
->vcpu
.arch
.shadow_efer
& EFER_SVME
)
1332 || !is_paging(&svm
->vcpu
)) {
1333 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
1337 if (svm
->vmcb
->save
.cpl
) {
1338 kvm_inject_gp(&svm
->vcpu
, 0);
1345 static int nested_svm_check_exception(struct vcpu_svm
*svm
, unsigned nr
,
1346 bool has_error_code
, u32 error_code
)
1348 if (is_nested(svm
)) {
1349 svm
->vmcb
->control
.exit_code
= SVM_EXIT_EXCP_BASE
+ nr
;
1350 svm
->vmcb
->control
.exit_code_hi
= 0;
1351 svm
->vmcb
->control
.exit_info_1
= error_code
;
1352 svm
->vmcb
->control
.exit_info_2
= svm
->vcpu
.arch
.cr2
;
1353 if (nested_svm_exit_handled(svm
, false)) {
1354 nsvm_printk("VMexit -> EXCP 0x%x\n", nr
);
1356 nested_svm_vmexit(svm
);
1364 static inline int nested_svm_intr(struct vcpu_svm
*svm
)
1366 if (is_nested(svm
)) {
1367 if (!(svm
->vcpu
.arch
.hflags
& HF_VINTR_MASK
))
1370 if (!(svm
->vcpu
.arch
.hflags
& HF_HIF_MASK
))
1373 svm
->vmcb
->control
.exit_code
= SVM_EXIT_INTR
;
1375 if (nested_svm_exit_handled(svm
, false)) {
1376 nsvm_printk("VMexit -> INTR\n");
1377 nested_svm_vmexit(svm
);
1385 static struct page
*nested_svm_get_page(struct vcpu_svm
*svm
, u64 gpa
)
1389 down_read(¤t
->mm
->mmap_sem
);
1390 page
= gfn_to_page(svm
->vcpu
.kvm
, gpa
>> PAGE_SHIFT
);
1391 up_read(¤t
->mm
->mmap_sem
);
1393 if (is_error_page(page
)) {
1394 printk(KERN_INFO
"%s: could not find page at 0x%llx\n",
1396 kvm_release_page_clean(page
);
1397 kvm_inject_gp(&svm
->vcpu
, 0);
1403 static int nested_svm_do(struct vcpu_svm
*svm
,
1404 u64 arg1_gpa
, u64 arg2_gpa
, void *opaque
,
1405 int (*handler
)(struct vcpu_svm
*svm
,
1410 struct page
*arg1_page
;
1411 struct page
*arg2_page
= NULL
;
1416 arg1_page
= nested_svm_get_page(svm
, arg1_gpa
);
1417 if(arg1_page
== NULL
)
1421 arg2_page
= nested_svm_get_page(svm
, arg2_gpa
);
1422 if(arg2_page
== NULL
) {
1423 kvm_release_page_clean(arg1_page
);
1428 arg1
= kmap_atomic(arg1_page
, KM_USER0
);
1430 arg2
= kmap_atomic(arg2_page
, KM_USER1
);
1432 retval
= handler(svm
, arg1
, arg2
, opaque
);
1434 kunmap_atomic(arg1
, KM_USER0
);
1436 kunmap_atomic(arg2
, KM_USER1
);
1438 kvm_release_page_dirty(arg1_page
);
1440 kvm_release_page_dirty(arg2_page
);
1445 static int nested_svm_exit_handled_real(struct vcpu_svm
*svm
,
1450 struct vmcb
*nested_vmcb
= (struct vmcb
*)arg1
;
1451 bool kvm_overrides
= *(bool *)opaque
;
1452 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1454 if (kvm_overrides
) {
1455 switch (exit_code
) {
1459 /* For now we are always handling NPFs when using them */
1464 /* When we're shadowing, trap PFs */
1465 case SVM_EXIT_EXCP_BASE
+ PF_VECTOR
:
1474 switch (exit_code
) {
1475 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR8
: {
1476 u32 cr_bits
= 1 << (exit_code
- SVM_EXIT_READ_CR0
);
1477 if (nested_vmcb
->control
.intercept_cr_read
& cr_bits
)
1481 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR8
: {
1482 u32 cr_bits
= 1 << (exit_code
- SVM_EXIT_WRITE_CR0
);
1483 if (nested_vmcb
->control
.intercept_cr_write
& cr_bits
)
1487 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR7
: {
1488 u32 dr_bits
= 1 << (exit_code
- SVM_EXIT_READ_DR0
);
1489 if (nested_vmcb
->control
.intercept_dr_read
& dr_bits
)
1493 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR7
: {
1494 u32 dr_bits
= 1 << (exit_code
- SVM_EXIT_WRITE_DR0
);
1495 if (nested_vmcb
->control
.intercept_dr_write
& dr_bits
)
1499 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 0x1f: {
1500 u32 excp_bits
= 1 << (exit_code
- SVM_EXIT_EXCP_BASE
);
1501 if (nested_vmcb
->control
.intercept_exceptions
& excp_bits
)
1506 u64 exit_bits
= 1ULL << (exit_code
- SVM_EXIT_INTR
);
1507 nsvm_printk("exit code: 0x%x\n", exit_code
);
1508 if (nested_vmcb
->control
.intercept
& exit_bits
)
1516 static int nested_svm_exit_handled_msr(struct vcpu_svm
*svm
,
1517 void *arg1
, void *arg2
,
1520 struct vmcb
*nested_vmcb
= (struct vmcb
*)arg1
;
1521 u8
*msrpm
= (u8
*)arg2
;
1523 u32 msr
= svm
->vcpu
.arch
.regs
[VCPU_REGS_RCX
];
1524 u32 param
= svm
->vmcb
->control
.exit_info_1
& 1;
1526 if (!(nested_vmcb
->control
.intercept
& (1ULL << INTERCEPT_MSR_PROT
)))
1534 case 0xc0000000 ... 0xc0001fff:
1535 t0
= (8192 + msr
- 0xc0000000) * 2;
1539 case 0xc0010000 ... 0xc0011fff:
1540 t0
= (16384 + msr
- 0xc0010000) * 2;
1548 if (msrpm
[t1
] & ((1 << param
) << t0
))
1554 static int nested_svm_exit_handled(struct vcpu_svm
*svm
, bool kvm_override
)
1556 bool k
= kvm_override
;
1558 switch (svm
->vmcb
->control
.exit_code
) {
1560 return nested_svm_do(svm
, svm
->nested_vmcb
,
1561 svm
->nested_vmcb_msrpm
, NULL
,
1562 nested_svm_exit_handled_msr
);
1566 return nested_svm_do(svm
, svm
->nested_vmcb
, 0, &k
,
1567 nested_svm_exit_handled_real
);
1570 static inline void copy_vmcb_control_area(struct vmcb
*dst_vmcb
, struct vmcb
*from_vmcb
)
1572 struct vmcb_control_area
*dst
= &dst_vmcb
->control
;
1573 struct vmcb_control_area
*from
= &from_vmcb
->control
;
1575 dst
->intercept_cr_read
= from
->intercept_cr_read
;
1576 dst
->intercept_cr_write
= from
->intercept_cr_write
;
1577 dst
->intercept_dr_read
= from
->intercept_dr_read
;
1578 dst
->intercept_dr_write
= from
->intercept_dr_write
;
1579 dst
->intercept_exceptions
= from
->intercept_exceptions
;
1580 dst
->intercept
= from
->intercept
;
1581 dst
->iopm_base_pa
= from
->iopm_base_pa
;
1582 dst
->msrpm_base_pa
= from
->msrpm_base_pa
;
1583 dst
->tsc_offset
= from
->tsc_offset
;
1584 dst
->asid
= from
->asid
;
1585 dst
->tlb_ctl
= from
->tlb_ctl
;
1586 dst
->int_ctl
= from
->int_ctl
;
1587 dst
->int_vector
= from
->int_vector
;
1588 dst
->int_state
= from
->int_state
;
1589 dst
->exit_code
= from
->exit_code
;
1590 dst
->exit_code_hi
= from
->exit_code_hi
;
1591 dst
->exit_info_1
= from
->exit_info_1
;
1592 dst
->exit_info_2
= from
->exit_info_2
;
1593 dst
->exit_int_info
= from
->exit_int_info
;
1594 dst
->exit_int_info_err
= from
->exit_int_info_err
;
1595 dst
->nested_ctl
= from
->nested_ctl
;
1596 dst
->event_inj
= from
->event_inj
;
1597 dst
->event_inj_err
= from
->event_inj_err
;
1598 dst
->nested_cr3
= from
->nested_cr3
;
1599 dst
->lbr_ctl
= from
->lbr_ctl
;
1602 static int nested_svm_vmexit_real(struct vcpu_svm
*svm
, void *arg1
,
1603 void *arg2
, void *opaque
)
1605 struct vmcb
*nested_vmcb
= (struct vmcb
*)arg1
;
1606 struct vmcb
*hsave
= svm
->hsave
;
1607 struct vmcb
*vmcb
= svm
->vmcb
;
1609 /* Give the current vmcb to the guest */
1612 nested_vmcb
->save
.es
= vmcb
->save
.es
;
1613 nested_vmcb
->save
.cs
= vmcb
->save
.cs
;
1614 nested_vmcb
->save
.ss
= vmcb
->save
.ss
;
1615 nested_vmcb
->save
.ds
= vmcb
->save
.ds
;
1616 nested_vmcb
->save
.gdtr
= vmcb
->save
.gdtr
;
1617 nested_vmcb
->save
.idtr
= vmcb
->save
.idtr
;
1619 nested_vmcb
->save
.cr3
= vmcb
->save
.cr3
;
1620 nested_vmcb
->save
.cr2
= vmcb
->save
.cr2
;
1621 nested_vmcb
->save
.rflags
= vmcb
->save
.rflags
;
1622 nested_vmcb
->save
.rip
= vmcb
->save
.rip
;
1623 nested_vmcb
->save
.rsp
= vmcb
->save
.rsp
;
1624 nested_vmcb
->save
.rax
= vmcb
->save
.rax
;
1625 nested_vmcb
->save
.dr7
= vmcb
->save
.dr7
;
1626 nested_vmcb
->save
.dr6
= vmcb
->save
.dr6
;
1627 nested_vmcb
->save
.cpl
= vmcb
->save
.cpl
;
1629 nested_vmcb
->control
.int_ctl
= vmcb
->control
.int_ctl
;
1630 nested_vmcb
->control
.int_vector
= vmcb
->control
.int_vector
;
1631 nested_vmcb
->control
.int_state
= vmcb
->control
.int_state
;
1632 nested_vmcb
->control
.exit_code
= vmcb
->control
.exit_code
;
1633 nested_vmcb
->control
.exit_code_hi
= vmcb
->control
.exit_code_hi
;
1634 nested_vmcb
->control
.exit_info_1
= vmcb
->control
.exit_info_1
;
1635 nested_vmcb
->control
.exit_info_2
= vmcb
->control
.exit_info_2
;
1636 nested_vmcb
->control
.exit_int_info
= vmcb
->control
.exit_int_info
;
1637 nested_vmcb
->control
.exit_int_info_err
= vmcb
->control
.exit_int_info_err
;
1638 nested_vmcb
->control
.tlb_ctl
= 0;
1639 nested_vmcb
->control
.event_inj
= 0;
1640 nested_vmcb
->control
.event_inj_err
= 0;
1642 /* We always set V_INTR_MASKING and remember the old value in hflags */
1643 if (!(svm
->vcpu
.arch
.hflags
& HF_VINTR_MASK
))
1644 nested_vmcb
->control
.int_ctl
&= ~V_INTR_MASKING_MASK
;
1646 /* Restore the original control entries */
1647 copy_vmcb_control_area(vmcb
, hsave
);
1649 /* Kill any pending exceptions */
1650 if (svm
->vcpu
.arch
.exception
.pending
== true)
1651 nsvm_printk("WARNING: Pending Exception\n");
1653 kvm_clear_exception_queue(&svm
->vcpu
);
1654 kvm_clear_interrupt_queue(&svm
->vcpu
);
1656 /* Restore selected save entries */
1657 svm
->vmcb
->save
.es
= hsave
->save
.es
;
1658 svm
->vmcb
->save
.cs
= hsave
->save
.cs
;
1659 svm
->vmcb
->save
.ss
= hsave
->save
.ss
;
1660 svm
->vmcb
->save
.ds
= hsave
->save
.ds
;
1661 svm
->vmcb
->save
.gdtr
= hsave
->save
.gdtr
;
1662 svm
->vmcb
->save
.idtr
= hsave
->save
.idtr
;
1663 svm
->vmcb
->save
.rflags
= hsave
->save
.rflags
;
1664 svm_set_efer(&svm
->vcpu
, hsave
->save
.efer
);
1665 svm_set_cr0(&svm
->vcpu
, hsave
->save
.cr0
| X86_CR0_PE
);
1666 svm_set_cr4(&svm
->vcpu
, hsave
->save
.cr4
);
1668 svm
->vmcb
->save
.cr3
= hsave
->save
.cr3
;
1669 svm
->vcpu
.arch
.cr3
= hsave
->save
.cr3
;
1671 kvm_set_cr3(&svm
->vcpu
, hsave
->save
.cr3
);
1673 kvm_register_write(&svm
->vcpu
, VCPU_REGS_RAX
, hsave
->save
.rax
);
1674 kvm_register_write(&svm
->vcpu
, VCPU_REGS_RSP
, hsave
->save
.rsp
);
1675 kvm_register_write(&svm
->vcpu
, VCPU_REGS_RIP
, hsave
->save
.rip
);
1676 svm
->vmcb
->save
.dr7
= 0;
1677 svm
->vmcb
->save
.cpl
= 0;
1678 svm
->vmcb
->control
.exit_int_info
= 0;
1680 /* Exit nested SVM mode */
1681 svm
->nested_vmcb
= 0;
1686 static int nested_svm_vmexit(struct vcpu_svm
*svm
)
1688 nsvm_printk("VMexit\n");
1689 if (nested_svm_do(svm
, svm
->nested_vmcb
, 0,
1690 NULL
, nested_svm_vmexit_real
))
1693 kvm_mmu_reset_context(&svm
->vcpu
);
1694 kvm_mmu_load(&svm
->vcpu
);
1699 static int nested_svm_vmrun_msrpm(struct vcpu_svm
*svm
, void *arg1
,
1700 void *arg2
, void *opaque
)
1703 u32
*nested_msrpm
= (u32
*)arg1
;
1704 for (i
=0; i
< PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
) / 4; i
++)
1705 svm
->nested_msrpm
[i
] = svm
->msrpm
[i
] | nested_msrpm
[i
];
1706 svm
->vmcb
->control
.msrpm_base_pa
= __pa(svm
->nested_msrpm
);
1711 static int nested_svm_vmrun(struct vcpu_svm
*svm
, void *arg1
,
1712 void *arg2
, void *opaque
)
1714 struct vmcb
*nested_vmcb
= (struct vmcb
*)arg1
;
1715 struct vmcb
*hsave
= svm
->hsave
;
1716 struct vmcb
*vmcb
= svm
->vmcb
;
1718 /* nested_vmcb is our indicator if nested SVM is activated */
1719 svm
->nested_vmcb
= svm
->vmcb
->save
.rax
;
1721 /* Clear internal status */
1722 kvm_clear_exception_queue(&svm
->vcpu
);
1723 kvm_clear_interrupt_queue(&svm
->vcpu
);
1725 /* Save the old vmcb, so we don't need to pick what we save, but
1726 can restore everything when a VMEXIT occurs */
1727 hsave
->save
.es
= vmcb
->save
.es
;
1728 hsave
->save
.cs
= vmcb
->save
.cs
;
1729 hsave
->save
.ss
= vmcb
->save
.ss
;
1730 hsave
->save
.ds
= vmcb
->save
.ds
;
1731 hsave
->save
.gdtr
= vmcb
->save
.gdtr
;
1732 hsave
->save
.idtr
= vmcb
->save
.idtr
;
1733 hsave
->save
.efer
= svm
->vcpu
.arch
.shadow_efer
;
1734 hsave
->save
.cr0
= svm
->vcpu
.arch
.cr0
;
1735 hsave
->save
.cr4
= svm
->vcpu
.arch
.cr4
;
1736 hsave
->save
.rflags
= vmcb
->save
.rflags
;
1737 hsave
->save
.rip
= svm
->next_rip
;
1738 hsave
->save
.rsp
= vmcb
->save
.rsp
;
1739 hsave
->save
.rax
= vmcb
->save
.rax
;
1741 hsave
->save
.cr3
= vmcb
->save
.cr3
;
1743 hsave
->save
.cr3
= svm
->vcpu
.arch
.cr3
;
1745 copy_vmcb_control_area(hsave
, vmcb
);
1747 if (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
)
1748 svm
->vcpu
.arch
.hflags
|= HF_HIF_MASK
;
1750 svm
->vcpu
.arch
.hflags
&= ~HF_HIF_MASK
;
1752 /* Load the nested guest state */
1753 svm
->vmcb
->save
.es
= nested_vmcb
->save
.es
;
1754 svm
->vmcb
->save
.cs
= nested_vmcb
->save
.cs
;
1755 svm
->vmcb
->save
.ss
= nested_vmcb
->save
.ss
;
1756 svm
->vmcb
->save
.ds
= nested_vmcb
->save
.ds
;
1757 svm
->vmcb
->save
.gdtr
= nested_vmcb
->save
.gdtr
;
1758 svm
->vmcb
->save
.idtr
= nested_vmcb
->save
.idtr
;
1759 svm
->vmcb
->save
.rflags
= nested_vmcb
->save
.rflags
;
1760 svm_set_efer(&svm
->vcpu
, nested_vmcb
->save
.efer
);
1761 svm_set_cr0(&svm
->vcpu
, nested_vmcb
->save
.cr0
);
1762 svm_set_cr4(&svm
->vcpu
, nested_vmcb
->save
.cr4
);
1764 svm
->vmcb
->save
.cr3
= nested_vmcb
->save
.cr3
;
1765 svm
->vcpu
.arch
.cr3
= nested_vmcb
->save
.cr3
;
1767 kvm_set_cr3(&svm
->vcpu
, nested_vmcb
->save
.cr3
);
1768 kvm_mmu_reset_context(&svm
->vcpu
);
1770 svm
->vmcb
->save
.cr2
= svm
->vcpu
.arch
.cr2
= nested_vmcb
->save
.cr2
;
1771 kvm_register_write(&svm
->vcpu
, VCPU_REGS_RAX
, nested_vmcb
->save
.rax
);
1772 kvm_register_write(&svm
->vcpu
, VCPU_REGS_RSP
, nested_vmcb
->save
.rsp
);
1773 kvm_register_write(&svm
->vcpu
, VCPU_REGS_RIP
, nested_vmcb
->save
.rip
);
1774 /* In case we don't even reach vcpu_run, the fields are not updated */
1775 svm
->vmcb
->save
.rax
= nested_vmcb
->save
.rax
;
1776 svm
->vmcb
->save
.rsp
= nested_vmcb
->save
.rsp
;
1777 svm
->vmcb
->save
.rip
= nested_vmcb
->save
.rip
;
1778 svm
->vmcb
->save
.dr7
= nested_vmcb
->save
.dr7
;
1779 svm
->vmcb
->save
.dr6
= nested_vmcb
->save
.dr6
;
1780 svm
->vmcb
->save
.cpl
= nested_vmcb
->save
.cpl
;
1782 /* We don't want a nested guest to be more powerful than the guest,
1783 so all intercepts are ORed */
1784 svm
->vmcb
->control
.intercept_cr_read
|=
1785 nested_vmcb
->control
.intercept_cr_read
;
1786 svm
->vmcb
->control
.intercept_cr_write
|=
1787 nested_vmcb
->control
.intercept_cr_write
;
1788 svm
->vmcb
->control
.intercept_dr_read
|=
1789 nested_vmcb
->control
.intercept_dr_read
;
1790 svm
->vmcb
->control
.intercept_dr_write
|=
1791 nested_vmcb
->control
.intercept_dr_write
;
1792 svm
->vmcb
->control
.intercept_exceptions
|=
1793 nested_vmcb
->control
.intercept_exceptions
;
1795 svm
->vmcb
->control
.intercept
|= nested_vmcb
->control
.intercept
;
1797 svm
->nested_vmcb_msrpm
= nested_vmcb
->control
.msrpm_base_pa
;
1799 force_new_asid(&svm
->vcpu
);
1800 svm
->vmcb
->control
.exit_int_info
= nested_vmcb
->control
.exit_int_info
;
1801 svm
->vmcb
->control
.exit_int_info_err
= nested_vmcb
->control
.exit_int_info_err
;
1802 svm
->vmcb
->control
.int_ctl
= nested_vmcb
->control
.int_ctl
| V_INTR_MASKING_MASK
;
1803 if (nested_vmcb
->control
.int_ctl
& V_IRQ_MASK
) {
1804 nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
1805 nested_vmcb
->control
.int_ctl
);
1807 if (nested_vmcb
->control
.int_ctl
& V_INTR_MASKING_MASK
)
1808 svm
->vcpu
.arch
.hflags
|= HF_VINTR_MASK
;
1810 svm
->vcpu
.arch
.hflags
&= ~HF_VINTR_MASK
;
1812 nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
1813 nested_vmcb
->control
.exit_int_info
,
1814 nested_vmcb
->control
.int_state
);
1816 svm
->vmcb
->control
.int_vector
= nested_vmcb
->control
.int_vector
;
1817 svm
->vmcb
->control
.int_state
= nested_vmcb
->control
.int_state
;
1818 svm
->vmcb
->control
.tsc_offset
+= nested_vmcb
->control
.tsc_offset
;
1819 if (nested_vmcb
->control
.event_inj
& SVM_EVTINJ_VALID
)
1820 nsvm_printk("Injecting Event: 0x%x\n",
1821 nested_vmcb
->control
.event_inj
);
1822 svm
->vmcb
->control
.event_inj
= nested_vmcb
->control
.event_inj
;
1823 svm
->vmcb
->control
.event_inj_err
= nested_vmcb
->control
.event_inj_err
;
1830 static int nested_svm_vmloadsave(struct vmcb
*from_vmcb
, struct vmcb
*to_vmcb
)
1832 to_vmcb
->save
.fs
= from_vmcb
->save
.fs
;
1833 to_vmcb
->save
.gs
= from_vmcb
->save
.gs
;
1834 to_vmcb
->save
.tr
= from_vmcb
->save
.tr
;
1835 to_vmcb
->save
.ldtr
= from_vmcb
->save
.ldtr
;
1836 to_vmcb
->save
.kernel_gs_base
= from_vmcb
->save
.kernel_gs_base
;
1837 to_vmcb
->save
.star
= from_vmcb
->save
.star
;
1838 to_vmcb
->save
.lstar
= from_vmcb
->save
.lstar
;
1839 to_vmcb
->save
.cstar
= from_vmcb
->save
.cstar
;
1840 to_vmcb
->save
.sfmask
= from_vmcb
->save
.sfmask
;
1841 to_vmcb
->save
.sysenter_cs
= from_vmcb
->save
.sysenter_cs
;
1842 to_vmcb
->save
.sysenter_esp
= from_vmcb
->save
.sysenter_esp
;
1843 to_vmcb
->save
.sysenter_eip
= from_vmcb
->save
.sysenter_eip
;
1848 static int nested_svm_vmload(struct vcpu_svm
*svm
, void *nested_vmcb
,
1849 void *arg2
, void *opaque
)
1851 return nested_svm_vmloadsave((struct vmcb
*)nested_vmcb
, svm
->vmcb
);
1854 static int nested_svm_vmsave(struct vcpu_svm
*svm
, void *nested_vmcb
,
1855 void *arg2
, void *opaque
)
1857 return nested_svm_vmloadsave(svm
->vmcb
, (struct vmcb
*)nested_vmcb
);
1860 static int vmload_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1862 if (nested_svm_check_permissions(svm
))
1865 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1866 skip_emulated_instruction(&svm
->vcpu
);
1868 nested_svm_do(svm
, svm
->vmcb
->save
.rax
, 0, NULL
, nested_svm_vmload
);
1873 static int vmsave_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1875 if (nested_svm_check_permissions(svm
))
1878 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1879 skip_emulated_instruction(&svm
->vcpu
);
1881 nested_svm_do(svm
, svm
->vmcb
->save
.rax
, 0, NULL
, nested_svm_vmsave
);
1886 static int vmrun_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1888 nsvm_printk("VMrun\n");
1889 if (nested_svm_check_permissions(svm
))
1892 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1893 skip_emulated_instruction(&svm
->vcpu
);
1895 if (nested_svm_do(svm
, svm
->vmcb
->save
.rax
, 0,
1896 NULL
, nested_svm_vmrun
))
1899 if (nested_svm_do(svm
, svm
->nested_vmcb_msrpm
, 0,
1900 NULL
, nested_svm_vmrun_msrpm
))
1906 static int stgi_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1908 if (nested_svm_check_permissions(svm
))
1911 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1912 skip_emulated_instruction(&svm
->vcpu
);
1919 static int clgi_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1921 if (nested_svm_check_permissions(svm
))
1924 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1925 skip_emulated_instruction(&svm
->vcpu
);
1929 /* After a CLGI no interrupts should come */
1930 svm_clear_vintr(svm
);
1931 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
1936 static int invlpga_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1938 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1939 nsvm_printk("INVLPGA\n");
1941 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
1942 kvm_mmu_invlpg(vcpu
, vcpu
->arch
.regs
[VCPU_REGS_RAX
]);
1944 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1945 skip_emulated_instruction(&svm
->vcpu
);
1949 static int invalid_op_interception(struct vcpu_svm
*svm
,
1950 struct kvm_run
*kvm_run
)
1952 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
1956 static int task_switch_interception(struct vcpu_svm
*svm
,
1957 struct kvm_run
*kvm_run
)
1961 int int_type
= svm
->vmcb
->control
.exit_int_info
&
1962 SVM_EXITINTINFO_TYPE_MASK
;
1963 int int_vec
= svm
->vmcb
->control
.exit_int_info
& SVM_EVTINJ_VEC_MASK
;
1965 svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_TYPE_MASK
;
1967 svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_VALID
;
1969 tss_selector
= (u16
)svm
->vmcb
->control
.exit_info_1
;
1971 if (svm
->vmcb
->control
.exit_info_2
&
1972 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET
))
1973 reason
= TASK_SWITCH_IRET
;
1974 else if (svm
->vmcb
->control
.exit_info_2
&
1975 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP
))
1976 reason
= TASK_SWITCH_JMP
;
1978 reason
= TASK_SWITCH_GATE
;
1980 reason
= TASK_SWITCH_CALL
;
1982 if (reason
== TASK_SWITCH_GATE
) {
1984 case SVM_EXITINTINFO_TYPE_NMI
:
1985 svm
->vcpu
.arch
.nmi_injected
= false;
1987 case SVM_EXITINTINFO_TYPE_EXEPT
:
1988 kvm_clear_exception_queue(&svm
->vcpu
);
1990 case SVM_EXITINTINFO_TYPE_INTR
:
1991 kvm_clear_interrupt_queue(&svm
->vcpu
);
1998 if (reason
!= TASK_SWITCH_GATE
||
1999 int_type
== SVM_EXITINTINFO_TYPE_SOFT
||
2000 (int_type
== SVM_EXITINTINFO_TYPE_EXEPT
&&
2001 (int_vec
== OF_VECTOR
|| int_vec
== BP_VECTOR
)))
2002 skip_emulated_instruction(&svm
->vcpu
);
2004 return kvm_task_switch(&svm
->vcpu
, tss_selector
, reason
);
2007 static int cpuid_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2009 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 2;
2010 kvm_emulate_cpuid(&svm
->vcpu
);
2014 static int iret_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2016 ++svm
->vcpu
.stat
.nmi_window_exits
;
2017 svm
->vmcb
->control
.intercept
&= ~(1UL << INTERCEPT_IRET
);
2018 svm
->vcpu
.arch
.hflags
|= HF_IRET_MASK
;
2022 static int invlpg_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2024 if (emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0, 0) != EMULATE_DONE
)
2025 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __func__
);
2029 static int emulate_on_interception(struct vcpu_svm
*svm
,
2030 struct kvm_run
*kvm_run
)
2032 if (emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0) != EMULATE_DONE
)
2033 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __func__
);
2037 static int cr8_write_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2039 u8 cr8_prev
= kvm_get_cr8(&svm
->vcpu
);
2040 /* instruction emulation calls kvm_set_cr8() */
2041 emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0);
2042 if (irqchip_in_kernel(svm
->vcpu
.kvm
)) {
2043 svm
->vmcb
->control
.intercept_cr_write
&= ~INTERCEPT_CR8_MASK
;
2046 if (cr8_prev
<= kvm_get_cr8(&svm
->vcpu
))
2048 kvm_run
->exit_reason
= KVM_EXIT_SET_TPR
;
2052 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
2054 struct vcpu_svm
*svm
= to_svm(vcpu
);
2057 case MSR_IA32_TSC
: {
2061 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
2065 *data
= svm
->vmcb
->save
.star
;
2067 #ifdef CONFIG_X86_64
2069 *data
= svm
->vmcb
->save
.lstar
;
2072 *data
= svm
->vmcb
->save
.cstar
;
2074 case MSR_KERNEL_GS_BASE
:
2075 *data
= svm
->vmcb
->save
.kernel_gs_base
;
2077 case MSR_SYSCALL_MASK
:
2078 *data
= svm
->vmcb
->save
.sfmask
;
2081 case MSR_IA32_SYSENTER_CS
:
2082 *data
= svm
->vmcb
->save
.sysenter_cs
;
2084 case MSR_IA32_SYSENTER_EIP
:
2085 *data
= svm
->sysenter_eip
;
2087 case MSR_IA32_SYSENTER_ESP
:
2088 *data
= svm
->sysenter_esp
;
2090 /* Nobody will change the following 5 values in the VMCB so
2091 we can safely return them on rdmsr. They will always be 0
2092 until LBRV is implemented. */
2093 case MSR_IA32_DEBUGCTLMSR
:
2094 *data
= svm
->vmcb
->save
.dbgctl
;
2096 case MSR_IA32_LASTBRANCHFROMIP
:
2097 *data
= svm
->vmcb
->save
.br_from
;
2099 case MSR_IA32_LASTBRANCHTOIP
:
2100 *data
= svm
->vmcb
->save
.br_to
;
2102 case MSR_IA32_LASTINTFROMIP
:
2103 *data
= svm
->vmcb
->save
.last_excp_from
;
2105 case MSR_IA32_LASTINTTOIP
:
2106 *data
= svm
->vmcb
->save
.last_excp_to
;
2108 case MSR_VM_HSAVE_PA
:
2109 *data
= svm
->hsave_msr
;
2114 case MSR_IA32_UCODE_REV
:
2118 return kvm_get_msr_common(vcpu
, ecx
, data
);
2123 static int rdmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2125 u32 ecx
= svm
->vcpu
.arch
.regs
[VCPU_REGS_RCX
];
2128 if (svm_get_msr(&svm
->vcpu
, ecx
, &data
))
2129 kvm_inject_gp(&svm
->vcpu
, 0);
2131 trace_kvm_msr_read(ecx
, data
);
2133 svm
->vcpu
.arch
.regs
[VCPU_REGS_RAX
] = data
& 0xffffffff;
2134 svm
->vcpu
.arch
.regs
[VCPU_REGS_RDX
] = data
>> 32;
2135 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 2;
2136 skip_emulated_instruction(&svm
->vcpu
);
2141 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
2143 struct vcpu_svm
*svm
= to_svm(vcpu
);
2146 case MSR_IA32_TSC
: {
2150 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
2154 svm
->vmcb
->save
.star
= data
;
2156 #ifdef CONFIG_X86_64
2158 svm
->vmcb
->save
.lstar
= data
;
2161 svm
->vmcb
->save
.cstar
= data
;
2163 case MSR_KERNEL_GS_BASE
:
2164 svm
->vmcb
->save
.kernel_gs_base
= data
;
2166 case MSR_SYSCALL_MASK
:
2167 svm
->vmcb
->save
.sfmask
= data
;
2170 case MSR_IA32_SYSENTER_CS
:
2171 svm
->vmcb
->save
.sysenter_cs
= data
;
2173 case MSR_IA32_SYSENTER_EIP
:
2174 svm
->sysenter_eip
= data
;
2175 svm
->vmcb
->save
.sysenter_eip
= data
;
2177 case MSR_IA32_SYSENTER_ESP
:
2178 svm
->sysenter_esp
= data
;
2179 svm
->vmcb
->save
.sysenter_esp
= data
;
2181 case MSR_IA32_DEBUGCTLMSR
:
2182 if (!svm_has(SVM_FEATURE_LBRV
)) {
2183 pr_unimpl(vcpu
, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2187 if (data
& DEBUGCTL_RESERVED_BITS
)
2190 svm
->vmcb
->save
.dbgctl
= data
;
2191 if (data
& (1ULL<<0))
2192 svm_enable_lbrv(svm
);
2194 svm_disable_lbrv(svm
);
2196 case MSR_VM_HSAVE_PA
:
2197 svm
->hsave_msr
= data
;
2201 pr_unimpl(vcpu
, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx
, data
);
2204 return kvm_set_msr_common(vcpu
, ecx
, data
);
2209 static int wrmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2211 u32 ecx
= svm
->vcpu
.arch
.regs
[VCPU_REGS_RCX
];
2212 u64 data
= (svm
->vcpu
.arch
.regs
[VCPU_REGS_RAX
] & -1u)
2213 | ((u64
)(svm
->vcpu
.arch
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
2215 trace_kvm_msr_write(ecx
, data
);
2217 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 2;
2218 if (svm_set_msr(&svm
->vcpu
, ecx
, data
))
2219 kvm_inject_gp(&svm
->vcpu
, 0);
2221 skip_emulated_instruction(&svm
->vcpu
);
2225 static int msr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
2227 if (svm
->vmcb
->control
.exit_info_1
)
2228 return wrmsr_interception(svm
, kvm_run
);
2230 return rdmsr_interception(svm
, kvm_run
);
2233 static int interrupt_window_interception(struct vcpu_svm
*svm
,
2234 struct kvm_run
*kvm_run
)
2236 svm_clear_vintr(svm
);
2237 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
2239 * If the user space waits to inject interrupts, exit as soon as
2242 if (!irqchip_in_kernel(svm
->vcpu
.kvm
) &&
2243 kvm_run
->request_interrupt_window
&&
2244 !kvm_cpu_has_interrupt(&svm
->vcpu
)) {
2245 ++svm
->vcpu
.stat
.irq_window_exits
;
2246 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
2253 static int (*svm_exit_handlers
[])(struct vcpu_svm
*svm
,
2254 struct kvm_run
*kvm_run
) = {
2255 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
2256 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
2257 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
2258 [SVM_EXIT_READ_CR8
] = emulate_on_interception
,
2260 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
2261 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
2262 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
2263 [SVM_EXIT_WRITE_CR8
] = cr8_write_interception
,
2264 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
2265 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
2266 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
2267 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
2268 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
2269 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
2270 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
2271 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
2272 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
2273 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
2274 [SVM_EXIT_EXCP_BASE
+ DB_VECTOR
] = db_interception
,
2275 [SVM_EXIT_EXCP_BASE
+ BP_VECTOR
] = bp_interception
,
2276 [SVM_EXIT_EXCP_BASE
+ UD_VECTOR
] = ud_interception
,
2277 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
2278 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
2279 [SVM_EXIT_EXCP_BASE
+ MC_VECTOR
] = mc_interception
,
2280 [SVM_EXIT_INTR
] = intr_interception
,
2281 [SVM_EXIT_NMI
] = nmi_interception
,
2282 [SVM_EXIT_SMI
] = nop_on_interception
,
2283 [SVM_EXIT_INIT
] = nop_on_interception
,
2284 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
2285 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
2286 [SVM_EXIT_CPUID
] = cpuid_interception
,
2287 [SVM_EXIT_IRET
] = iret_interception
,
2288 [SVM_EXIT_INVD
] = emulate_on_interception
,
2289 [SVM_EXIT_HLT
] = halt_interception
,
2290 [SVM_EXIT_INVLPG
] = invlpg_interception
,
2291 [SVM_EXIT_INVLPGA
] = invlpga_interception
,
2292 [SVM_EXIT_IOIO
] = io_interception
,
2293 [SVM_EXIT_MSR
] = msr_interception
,
2294 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
2295 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
2296 [SVM_EXIT_VMRUN
] = vmrun_interception
,
2297 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
2298 [SVM_EXIT_VMLOAD
] = vmload_interception
,
2299 [SVM_EXIT_VMSAVE
] = vmsave_interception
,
2300 [SVM_EXIT_STGI
] = stgi_interception
,
2301 [SVM_EXIT_CLGI
] = clgi_interception
,
2302 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
2303 [SVM_EXIT_WBINVD
] = emulate_on_interception
,
2304 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
2305 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
2306 [SVM_EXIT_NPF
] = pf_interception
,
2309 static int handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
2311 struct vcpu_svm
*svm
= to_svm(vcpu
);
2312 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
2314 trace_kvm_exit(exit_code
, svm
->vmcb
->save
.rip
);
2316 if (is_nested(svm
)) {
2317 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2318 exit_code
, svm
->vmcb
->control
.exit_info_1
,
2319 svm
->vmcb
->control
.exit_info_2
, svm
->vmcb
->save
.rip
);
2320 if (nested_svm_exit_handled(svm
, true)) {
2321 nested_svm_vmexit(svm
);
2322 nsvm_printk("-> #VMEXIT\n");
2329 if ((vcpu
->arch
.cr0
^ svm
->vmcb
->save
.cr0
) & X86_CR0_PG
) {
2330 svm_set_cr0(vcpu
, svm
->vmcb
->save
.cr0
);
2333 vcpu
->arch
.cr0
= svm
->vmcb
->save
.cr0
;
2334 vcpu
->arch
.cr3
= svm
->vmcb
->save
.cr3
;
2336 kvm_mmu_reset_context(vcpu
);
2342 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
2343 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
2344 kvm_run
->fail_entry
.hardware_entry_failure_reason
2345 = svm
->vmcb
->control
.exit_code
;
2349 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
2350 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
&&
2351 exit_code
!= SVM_EXIT_NPF
&& exit_code
!= SVM_EXIT_TASK_SWITCH
)
2352 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
2354 __func__
, svm
->vmcb
->control
.exit_int_info
,
2357 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
2358 || !svm_exit_handlers
[exit_code
]) {
2359 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
2360 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
2364 return svm_exit_handlers
[exit_code
](svm
, kvm_run
);
2367 static void reload_tss(struct kvm_vcpu
*vcpu
)
2369 int cpu
= raw_smp_processor_id();
2371 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
2372 svm_data
->tss_desc
->type
= 9; /* available 32/64-bit TSS */
2376 static void pre_svm_run(struct vcpu_svm
*svm
)
2378 int cpu
= raw_smp_processor_id();
2380 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
2382 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
2383 /* FIXME: handle wraparound of asid_generation */
2384 if (svm
->asid_generation
!= svm_data
->asid_generation
)
2385 new_asid(svm
, svm_data
);
2388 static void svm_inject_nmi(struct kvm_vcpu
*vcpu
)
2390 struct vcpu_svm
*svm
= to_svm(vcpu
);
2392 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_NMI
;
2393 vcpu
->arch
.hflags
|= HF_NMI_MASK
;
2394 svm
->vmcb
->control
.intercept
|= (1UL << INTERCEPT_IRET
);
2395 ++vcpu
->stat
.nmi_injections
;
2398 static inline void svm_inject_irq(struct vcpu_svm
*svm
, int irq
)
2400 struct vmcb_control_area
*control
;
2402 trace_kvm_inj_virq(irq
);
2404 ++svm
->vcpu
.stat
.irq_injections
;
2405 control
= &svm
->vmcb
->control
;
2406 control
->int_vector
= irq
;
2407 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
2408 control
->int_ctl
|= V_IRQ_MASK
|
2409 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
2412 static void svm_set_irq(struct kvm_vcpu
*vcpu
)
2414 struct vcpu_svm
*svm
= to_svm(vcpu
);
2416 BUG_ON(!(gif_set(svm
)));
2418 svm
->vmcb
->control
.event_inj
= vcpu
->arch
.interrupt
.nr
|
2419 SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
;
2422 static void update_cr8_intercept(struct kvm_vcpu
*vcpu
, int tpr
, int irr
)
2424 struct vcpu_svm
*svm
= to_svm(vcpu
);
2430 svm
->vmcb
->control
.intercept_cr_write
|= INTERCEPT_CR8_MASK
;
2433 static int svm_nmi_allowed(struct kvm_vcpu
*vcpu
)
2435 struct vcpu_svm
*svm
= to_svm(vcpu
);
2436 struct vmcb
*vmcb
= svm
->vmcb
;
2437 return !(vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
2438 !(svm
->vcpu
.arch
.hflags
& HF_NMI_MASK
);
2441 static int svm_interrupt_allowed(struct kvm_vcpu
*vcpu
)
2443 struct vcpu_svm
*svm
= to_svm(vcpu
);
2444 struct vmcb
*vmcb
= svm
->vmcb
;
2445 return (vmcb
->save
.rflags
& X86_EFLAGS_IF
) &&
2446 !(vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
2451 static void enable_irq_window(struct kvm_vcpu
*vcpu
)
2453 struct vcpu_svm
*svm
= to_svm(vcpu
);
2454 nsvm_printk("Trying to open IRQ window\n");
2456 nested_svm_intr(svm
);
2458 /* In case GIF=0 we can't rely on the CPU to tell us when
2459 * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
2460 * The next time we get that intercept, this function will be
2461 * called again though and we'll get the vintr intercept. */
2464 svm_inject_irq(svm
, 0x0);
2468 static void enable_nmi_window(struct kvm_vcpu
*vcpu
)
2470 struct vcpu_svm
*svm
= to_svm(vcpu
);
2472 if ((svm
->vcpu
.arch
.hflags
& (HF_NMI_MASK
| HF_IRET_MASK
))
2474 return; /* IRET will cause a vm exit */
2476 /* Something prevents NMI from been injected. Single step over
2477 possible problem (IRET or exception injection or interrupt
2479 vcpu
->arch
.singlestep
= true;
2480 svm
->vmcb
->save
.rflags
|= (X86_EFLAGS_TF
| X86_EFLAGS_RF
);
2481 update_db_intercept(vcpu
);
2484 static int svm_set_tss_addr(struct kvm
*kvm
, unsigned int addr
)
2489 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
2491 force_new_asid(vcpu
);
2494 static void svm_prepare_guest_switch(struct kvm_vcpu
*vcpu
)
2498 static inline void sync_cr8_to_lapic(struct kvm_vcpu
*vcpu
)
2500 struct vcpu_svm
*svm
= to_svm(vcpu
);
2502 if (!(svm
->vmcb
->control
.intercept_cr_write
& INTERCEPT_CR8_MASK
)) {
2503 int cr8
= svm
->vmcb
->control
.int_ctl
& V_TPR_MASK
;
2504 kvm_set_cr8(vcpu
, cr8
);
2508 static inline void sync_lapic_to_cr8(struct kvm_vcpu
*vcpu
)
2510 struct vcpu_svm
*svm
= to_svm(vcpu
);
2513 cr8
= kvm_get_cr8(vcpu
);
2514 svm
->vmcb
->control
.int_ctl
&= ~V_TPR_MASK
;
2515 svm
->vmcb
->control
.int_ctl
|= cr8
& V_TPR_MASK
;
2518 static void svm_complete_interrupts(struct vcpu_svm
*svm
)
2522 u32 exitintinfo
= svm
->vmcb
->control
.exit_int_info
;
2524 if (svm
->vcpu
.arch
.hflags
& HF_IRET_MASK
)
2525 svm
->vcpu
.arch
.hflags
&= ~(HF_NMI_MASK
| HF_IRET_MASK
);
2527 svm
->vcpu
.arch
.nmi_injected
= false;
2528 kvm_clear_exception_queue(&svm
->vcpu
);
2529 kvm_clear_interrupt_queue(&svm
->vcpu
);
2531 if (!(exitintinfo
& SVM_EXITINTINFO_VALID
))
2534 vector
= exitintinfo
& SVM_EXITINTINFO_VEC_MASK
;
2535 type
= exitintinfo
& SVM_EXITINTINFO_TYPE_MASK
;
2538 case SVM_EXITINTINFO_TYPE_NMI
:
2539 svm
->vcpu
.arch
.nmi_injected
= true;
2541 case SVM_EXITINTINFO_TYPE_EXEPT
:
2542 /* In case of software exception do not reinject an exception
2543 vector, but re-execute and instruction instead */
2546 if (kvm_exception_is_soft(vector
))
2548 if (exitintinfo
& SVM_EXITINTINFO_VALID_ERR
) {
2549 u32 err
= svm
->vmcb
->control
.exit_int_info_err
;
2550 kvm_queue_exception_e(&svm
->vcpu
, vector
, err
);
2553 kvm_queue_exception(&svm
->vcpu
, vector
);
2555 case SVM_EXITINTINFO_TYPE_INTR
:
2556 kvm_queue_interrupt(&svm
->vcpu
, vector
, false);
2563 #ifdef CONFIG_X86_64
2569 static void svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2571 struct vcpu_svm
*svm
= to_svm(vcpu
);
2576 svm
->vmcb
->save
.rax
= vcpu
->arch
.regs
[VCPU_REGS_RAX
];
2577 svm
->vmcb
->save
.rsp
= vcpu
->arch
.regs
[VCPU_REGS_RSP
];
2578 svm
->vmcb
->save
.rip
= vcpu
->arch
.regs
[VCPU_REGS_RIP
];
2582 sync_lapic_to_cr8(vcpu
);
2584 save_host_msrs(vcpu
);
2585 fs_selector
= kvm_read_fs();
2586 gs_selector
= kvm_read_gs();
2587 ldt_selector
= kvm_read_ldt();
2588 if (!is_nested(svm
))
2589 svm
->vmcb
->save
.cr2
= vcpu
->arch
.cr2
;
2590 /* required for live migration with NPT */
2592 svm
->vmcb
->save
.cr3
= vcpu
->arch
.cr3
;
2599 "push %%"R
"bp; \n\t"
2600 "mov %c[rbx](%[svm]), %%"R
"bx \n\t"
2601 "mov %c[rcx](%[svm]), %%"R
"cx \n\t"
2602 "mov %c[rdx](%[svm]), %%"R
"dx \n\t"
2603 "mov %c[rsi](%[svm]), %%"R
"si \n\t"
2604 "mov %c[rdi](%[svm]), %%"R
"di \n\t"
2605 "mov %c[rbp](%[svm]), %%"R
"bp \n\t"
2606 #ifdef CONFIG_X86_64
2607 "mov %c[r8](%[svm]), %%r8 \n\t"
2608 "mov %c[r9](%[svm]), %%r9 \n\t"
2609 "mov %c[r10](%[svm]), %%r10 \n\t"
2610 "mov %c[r11](%[svm]), %%r11 \n\t"
2611 "mov %c[r12](%[svm]), %%r12 \n\t"
2612 "mov %c[r13](%[svm]), %%r13 \n\t"
2613 "mov %c[r14](%[svm]), %%r14 \n\t"
2614 "mov %c[r15](%[svm]), %%r15 \n\t"
2617 /* Enter guest mode */
2619 "mov %c[vmcb](%[svm]), %%"R
"ax \n\t"
2620 __ex(SVM_VMLOAD
) "\n\t"
2621 __ex(SVM_VMRUN
) "\n\t"
2622 __ex(SVM_VMSAVE
) "\n\t"
2625 /* Save guest registers, load host registers */
2626 "mov %%"R
"bx, %c[rbx](%[svm]) \n\t"
2627 "mov %%"R
"cx, %c[rcx](%[svm]) \n\t"
2628 "mov %%"R
"dx, %c[rdx](%[svm]) \n\t"
2629 "mov %%"R
"si, %c[rsi](%[svm]) \n\t"
2630 "mov %%"R
"di, %c[rdi](%[svm]) \n\t"
2631 "mov %%"R
"bp, %c[rbp](%[svm]) \n\t"
2632 #ifdef CONFIG_X86_64
2633 "mov %%r8, %c[r8](%[svm]) \n\t"
2634 "mov %%r9, %c[r9](%[svm]) \n\t"
2635 "mov %%r10, %c[r10](%[svm]) \n\t"
2636 "mov %%r11, %c[r11](%[svm]) \n\t"
2637 "mov %%r12, %c[r12](%[svm]) \n\t"
2638 "mov %%r13, %c[r13](%[svm]) \n\t"
2639 "mov %%r14, %c[r14](%[svm]) \n\t"
2640 "mov %%r15, %c[r15](%[svm]) \n\t"
2645 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
2646 [rbx
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RBX
])),
2647 [rcx
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RCX
])),
2648 [rdx
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RDX
])),
2649 [rsi
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RSI
])),
2650 [rdi
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RDI
])),
2651 [rbp
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RBP
]))
2652 #ifdef CONFIG_X86_64
2653 , [r8
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R8
])),
2654 [r9
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R9
])),
2655 [r10
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R10
])),
2656 [r11
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R11
])),
2657 [r12
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R12
])),
2658 [r13
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R13
])),
2659 [r14
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R14
])),
2660 [r15
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R15
]))
2663 , R
"bx", R
"cx", R
"dx", R
"si", R
"di"
2664 #ifdef CONFIG_X86_64
2665 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
2669 vcpu
->arch
.cr2
= svm
->vmcb
->save
.cr2
;
2670 vcpu
->arch
.regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
2671 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
2672 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = svm
->vmcb
->save
.rip
;
2674 kvm_load_fs(fs_selector
);
2675 kvm_load_gs(gs_selector
);
2676 kvm_load_ldt(ldt_selector
);
2677 load_host_msrs(vcpu
);
2681 local_irq_disable();
2685 sync_cr8_to_lapic(vcpu
);
2690 vcpu
->arch
.regs_avail
&= ~(1 << VCPU_EXREG_PDPTR
);
2691 vcpu
->arch
.regs_dirty
&= ~(1 << VCPU_EXREG_PDPTR
);
2694 svm_complete_interrupts(svm
);
2699 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
2701 struct vcpu_svm
*svm
= to_svm(vcpu
);
2704 svm
->vmcb
->control
.nested_cr3
= root
;
2705 force_new_asid(vcpu
);
2709 svm
->vmcb
->save
.cr3
= root
;
2710 force_new_asid(vcpu
);
2712 if (vcpu
->fpu_active
) {
2713 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
2714 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
2715 vcpu
->fpu_active
= 0;
2719 static int is_disabled(void)
2723 rdmsrl(MSR_VM_CR
, vm_cr
);
2724 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
2731 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
2734 * Patch in the VMMCALL instruction:
2736 hypercall
[0] = 0x0f;
2737 hypercall
[1] = 0x01;
2738 hypercall
[2] = 0xd9;
2741 static void svm_check_processor_compat(void *rtn
)
2746 static bool svm_cpu_has_accelerated_tpr(void)
2751 static int get_npt_level(void)
2753 #ifdef CONFIG_X86_64
2754 return PT64_ROOT_LEVEL
;
2756 return PT32E_ROOT_LEVEL
;
2760 static u64
svm_get_mt_mask(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool is_mmio
)
2765 static const struct trace_print_flags svm_exit_reasons_str
[] = {
2766 { SVM_EXIT_READ_CR0
, "read_cr0" },
2767 { SVM_EXIT_READ_CR3
, "read_cr3" },
2768 { SVM_EXIT_READ_CR4
, "read_cr4" },
2769 { SVM_EXIT_READ_CR8
, "read_cr8" },
2770 { SVM_EXIT_WRITE_CR0
, "write_cr0" },
2771 { SVM_EXIT_WRITE_CR3
, "write_cr3" },
2772 { SVM_EXIT_WRITE_CR4
, "write_cr4" },
2773 { SVM_EXIT_WRITE_CR8
, "write_cr8" },
2774 { SVM_EXIT_READ_DR0
, "read_dr0" },
2775 { SVM_EXIT_READ_DR1
, "read_dr1" },
2776 { SVM_EXIT_READ_DR2
, "read_dr2" },
2777 { SVM_EXIT_READ_DR3
, "read_dr3" },
2778 { SVM_EXIT_WRITE_DR0
, "write_dr0" },
2779 { SVM_EXIT_WRITE_DR1
, "write_dr1" },
2780 { SVM_EXIT_WRITE_DR2
, "write_dr2" },
2781 { SVM_EXIT_WRITE_DR3
, "write_dr3" },
2782 { SVM_EXIT_WRITE_DR5
, "write_dr5" },
2783 { SVM_EXIT_WRITE_DR7
, "write_dr7" },
2784 { SVM_EXIT_EXCP_BASE
+ DB_VECTOR
, "DB excp" },
2785 { SVM_EXIT_EXCP_BASE
+ BP_VECTOR
, "BP excp" },
2786 { SVM_EXIT_EXCP_BASE
+ UD_VECTOR
, "UD excp" },
2787 { SVM_EXIT_EXCP_BASE
+ PF_VECTOR
, "PF excp" },
2788 { SVM_EXIT_EXCP_BASE
+ NM_VECTOR
, "NM excp" },
2789 { SVM_EXIT_EXCP_BASE
+ MC_VECTOR
, "MC excp" },
2790 { SVM_EXIT_INTR
, "interrupt" },
2791 { SVM_EXIT_NMI
, "nmi" },
2792 { SVM_EXIT_SMI
, "smi" },
2793 { SVM_EXIT_INIT
, "init" },
2794 { SVM_EXIT_VINTR
, "vintr" },
2795 { SVM_EXIT_CPUID
, "cpuid" },
2796 { SVM_EXIT_INVD
, "invd" },
2797 { SVM_EXIT_HLT
, "hlt" },
2798 { SVM_EXIT_INVLPG
, "invlpg" },
2799 { SVM_EXIT_INVLPGA
, "invlpga" },
2800 { SVM_EXIT_IOIO
, "io" },
2801 { SVM_EXIT_MSR
, "msr" },
2802 { SVM_EXIT_TASK_SWITCH
, "task_switch" },
2803 { SVM_EXIT_SHUTDOWN
, "shutdown" },
2804 { SVM_EXIT_VMRUN
, "vmrun" },
2805 { SVM_EXIT_VMMCALL
, "hypercall" },
2806 { SVM_EXIT_VMLOAD
, "vmload" },
2807 { SVM_EXIT_VMSAVE
, "vmsave" },
2808 { SVM_EXIT_STGI
, "stgi" },
2809 { SVM_EXIT_CLGI
, "clgi" },
2810 { SVM_EXIT_SKINIT
, "skinit" },
2811 { SVM_EXIT_WBINVD
, "wbinvd" },
2812 { SVM_EXIT_MONITOR
, "monitor" },
2813 { SVM_EXIT_MWAIT
, "mwait" },
2814 { SVM_EXIT_NPF
, "npf" },
2818 static bool svm_gb_page_enable(void)
2823 static struct kvm_x86_ops svm_x86_ops
= {
2824 .cpu_has_kvm_support
= has_svm
,
2825 .disabled_by_bios
= is_disabled
,
2826 .hardware_setup
= svm_hardware_setup
,
2827 .hardware_unsetup
= svm_hardware_unsetup
,
2828 .check_processor_compatibility
= svm_check_processor_compat
,
2829 .hardware_enable
= svm_hardware_enable
,
2830 .hardware_disable
= svm_hardware_disable
,
2831 .cpu_has_accelerated_tpr
= svm_cpu_has_accelerated_tpr
,
2833 .vcpu_create
= svm_create_vcpu
,
2834 .vcpu_free
= svm_free_vcpu
,
2835 .vcpu_reset
= svm_vcpu_reset
,
2837 .prepare_guest_switch
= svm_prepare_guest_switch
,
2838 .vcpu_load
= svm_vcpu_load
,
2839 .vcpu_put
= svm_vcpu_put
,
2841 .set_guest_debug
= svm_guest_debug
,
2842 .get_msr
= svm_get_msr
,
2843 .set_msr
= svm_set_msr
,
2844 .get_segment_base
= svm_get_segment_base
,
2845 .get_segment
= svm_get_segment
,
2846 .set_segment
= svm_set_segment
,
2847 .get_cpl
= svm_get_cpl
,
2848 .get_cs_db_l_bits
= kvm_get_cs_db_l_bits
,
2849 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
2850 .set_cr0
= svm_set_cr0
,
2851 .set_cr3
= svm_set_cr3
,
2852 .set_cr4
= svm_set_cr4
,
2853 .set_efer
= svm_set_efer
,
2854 .get_idt
= svm_get_idt
,
2855 .set_idt
= svm_set_idt
,
2856 .get_gdt
= svm_get_gdt
,
2857 .set_gdt
= svm_set_gdt
,
2858 .get_dr
= svm_get_dr
,
2859 .set_dr
= svm_set_dr
,
2860 .cache_reg
= svm_cache_reg
,
2861 .get_rflags
= svm_get_rflags
,
2862 .set_rflags
= svm_set_rflags
,
2864 .tlb_flush
= svm_flush_tlb
,
2866 .run
= svm_vcpu_run
,
2867 .handle_exit
= handle_exit
,
2868 .skip_emulated_instruction
= skip_emulated_instruction
,
2869 .set_interrupt_shadow
= svm_set_interrupt_shadow
,
2870 .get_interrupt_shadow
= svm_get_interrupt_shadow
,
2871 .patch_hypercall
= svm_patch_hypercall
,
2872 .set_irq
= svm_set_irq
,
2873 .set_nmi
= svm_inject_nmi
,
2874 .queue_exception
= svm_queue_exception
,
2875 .interrupt_allowed
= svm_interrupt_allowed
,
2876 .nmi_allowed
= svm_nmi_allowed
,
2877 .enable_nmi_window
= enable_nmi_window
,
2878 .enable_irq_window
= enable_irq_window
,
2879 .update_cr8_intercept
= update_cr8_intercept
,
2881 .set_tss_addr
= svm_set_tss_addr
,
2882 .get_tdp_level
= get_npt_level
,
2883 .get_mt_mask
= svm_get_mt_mask
,
2885 .exit_reasons_str
= svm_exit_reasons_str
,
2886 .gb_page_enable
= svm_gb_page_enable
,
2889 static int __init
svm_init(void)
2891 return kvm_init(&svm_x86_ops
, sizeof(struct vcpu_svm
),
2895 static void __exit
svm_exit(void)
2900 module_init(svm_init
)
2901 module_exit(svm_exit
)