2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/highmem.h>
23 #include "x86_emulate.h"
25 MODULE_AUTHOR("Qumranet");
26 MODULE_LICENSE("GPL");
28 #define IOPM_ALLOC_ORDER 2
29 #define MSRPM_ALLOC_ORDER 1
35 #define DR7_GD_MASK (1 << 13)
36 #define DR6_BD_MASK (1 << 13)
37 #define CR4_DE_MASK (1UL << 3)
39 #define SEG_TYPE_LDT 2
40 #define SEG_TYPE_BUSY_TSS16 3
42 #define KVM_EFER_LMA (1 << 10)
43 #define KVM_EFER_LME (1 << 8)
45 unsigned long iopm_base
;
46 unsigned long msrpm_base
;
48 struct kvm_ldttss_desc
{
51 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
52 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
55 } __attribute__((packed
));
60 uint64_t asid_generation
;
63 struct kvm_ldttss_desc
*tss_desc
;
65 struct page
*save_area
;
68 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
70 struct svm_init_data
{
75 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
77 #define NUM_MSR_MAPS (sizeof(msrpm_ranges) / sizeof(*msrpm_ranges))
78 #define MSRS_RANGE_SIZE 2048
79 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
81 #define MAX_INST_SIZE 15
83 static unsigned get_addr_size(struct kvm_vcpu
*vcpu
)
85 struct vmcb_save_area
*sa
= &vcpu
->svm
->vmcb
->save
;
88 if (!(sa
->cr0
& CR0_PE_MASK
) || (sa
->rflags
& X86_EFLAGS_VM
))
91 cs_attrib
= sa
->cs
.attrib
;
93 return (cs_attrib
& SVM_SELECTOR_L_MASK
) ? 8 :
94 (cs_attrib
& SVM_SELECTOR_DB_MASK
) ? 4 : 2;
97 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
99 int word_index
= __ffs(vcpu
->irq_summary
);
100 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
101 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
103 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
104 if (!vcpu
->irq_pending
[word_index
])
105 clear_bit(word_index
, &vcpu
->irq_summary
);
109 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
111 set_bit(irq
, vcpu
->irq_pending
);
112 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
115 static inline void clgi(void)
117 asm volatile (SVM_CLGI
);
120 static inline void stgi(void)
122 asm volatile (SVM_STGI
);
125 static inline void invlpga(unsigned long addr
, u32 asid
)
127 asm volatile (SVM_INVLPGA :: "a"(addr
), "c"(asid
));
130 static inline unsigned long kvm_read_cr2(void)
134 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
138 static inline void kvm_write_cr2(unsigned long val
)
140 asm volatile ("mov %0, %%cr2" :: "r" (val
));
143 static inline unsigned long read_dr6(void)
147 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
151 static inline void write_dr6(unsigned long val
)
153 asm volatile ("mov %0, %%dr6" :: "r" (val
));
156 static inline unsigned long read_dr7(void)
160 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
164 static inline void write_dr7(unsigned long val
)
166 asm volatile ("mov %0, %%dr7" :: "r" (val
));
169 static inline int svm_is_long_mode(struct kvm_vcpu
*vcpu
)
171 return vcpu
->svm
->vmcb
->save
.efer
& KVM_EFER_LMA
;
174 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
176 vcpu
->svm
->asid_generation
--;
179 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
181 force_new_asid(vcpu
);
184 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
186 if (!(efer
& KVM_EFER_LMA
))
187 efer
&= ~KVM_EFER_LME
;
189 vcpu
->svm
->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
190 vcpu
->shadow_efer
= efer
;
193 static void svm_inject_gp(struct kvm_vcpu
*vcpu
, unsigned error_code
)
195 vcpu
->svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
196 SVM_EVTINJ_VALID_ERR
|
197 SVM_EVTINJ_TYPE_EXEPT
|
199 vcpu
->svm
->vmcb
->control
.event_inj_err
= error_code
;
202 static void inject_ud(struct kvm_vcpu
*vcpu
)
204 vcpu
->svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
205 SVM_EVTINJ_TYPE_EXEPT
|
209 static void inject_db(struct kvm_vcpu
*vcpu
)
211 vcpu
->svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
212 SVM_EVTINJ_TYPE_EXEPT
|
216 static int is_page_fault(uint32_t info
)
218 info
&= SVM_EVTINJ_VEC_MASK
| SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
219 return info
== (PF_VECTOR
| SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_EXEPT
);
222 static int is_external_interrupt(u32 info
)
224 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
225 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
228 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
230 if (!vcpu
->svm
->next_rip
) {
231 printk(KERN_DEBUG
"%s: NOP\n", __FUNCTION__
);
234 if (vcpu
->svm
->next_rip
- vcpu
->svm
->vmcb
->save
.rip
> 15) {
235 printk(KERN_ERR
"%s: ip 0x%llx next 0x%llx\n",
237 vcpu
->svm
->vmcb
->save
.rip
,
238 vcpu
->svm
->next_rip
);
241 vcpu
->rip
= vcpu
->svm
->vmcb
->save
.rip
= vcpu
->svm
->next_rip
;
242 vcpu
->svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
245 static int has_svm(void)
247 uint32_t eax
, ebx
, ecx
, edx
;
249 if (current_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
250 printk(KERN_INFO
"has_svm: not amd\n");
254 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
255 if (eax
< SVM_CPUID_FUNC
) {
256 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
260 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
261 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
262 printk(KERN_DEBUG
"has_svm: svm not available\n");
268 static void svm_hardware_disable(void *garbage
)
270 struct svm_cpu_data
*svm_data
271 = per_cpu(svm_data
, raw_smp_processor_id());
276 wrmsrl(MSR_VM_HSAVE_PA
, 0);
277 rdmsrl(MSR_EFER
, efer
);
278 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
279 per_cpu(svm_data
, raw_smp_processor_id()) = 0;
280 __free_page(svm_data
->save_area
);
285 static void svm_hardware_enable(void *garbage
)
288 struct svm_cpu_data
*svm_data
;
291 struct desc_ptr gdt_descr
;
293 struct Xgt_desc_struct gdt_descr
;
295 struct desc_struct
*gdt
;
296 int me
= raw_smp_processor_id();
299 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
302 svm_data
= per_cpu(svm_data
, me
);
305 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
310 svm_data
->asid_generation
= 1;
311 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
312 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
314 asm volatile ( "sgdt %0" : "=m"(gdt_descr
) );
315 gdt
= (struct desc_struct
*)gdt_descr
.address
;
316 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
318 rdmsrl(MSR_EFER
, efer
);
319 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
321 wrmsrl(MSR_VM_HSAVE_PA
,
322 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
325 static int svm_cpu_init(int cpu
)
327 struct svm_cpu_data
*svm_data
;
330 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
334 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
336 if (!svm_data
->save_area
)
339 per_cpu(svm_data
, cpu
) = svm_data
;
349 static int set_msr_interception(u32
*msrpm
, unsigned msr
,
354 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
355 if (msr
>= msrpm_ranges
[i
] &&
356 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
357 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
358 msrpm_ranges
[i
]) * 2;
360 u32
*base
= msrpm
+ (msr_offset
/ 32);
361 u32 msr_shift
= msr_offset
% 32;
362 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
363 *base
= (*base
& ~(0x3 << msr_shift
)) |
368 printk(KERN_DEBUG
"%s: not found 0x%x\n", __FUNCTION__
, msr
);
372 static __init
int svm_hardware_setup(void)
375 struct page
*iopm_pages
;
376 struct page
*msrpm_pages
;
380 kvm_emulator_want_group7_invlpg();
382 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
386 memset(page_address(iopm_pages
), 0xff,
387 PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
388 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
391 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
397 msrpm_va
= page_address(msrpm_pages
);
398 memset(msrpm_va
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
399 msrpm_base
= page_to_pfn(msrpm_pages
) << PAGE_SHIFT
;
402 set_msr_interception(msrpm_va
, MSR_GS_BASE
, 1, 1);
403 set_msr_interception(msrpm_va
, MSR_FS_BASE
, 1, 1);
404 set_msr_interception(msrpm_va
, MSR_KERNEL_GS_BASE
, 1, 1);
405 set_msr_interception(msrpm_va
, MSR_LSTAR
, 1, 1);
406 set_msr_interception(msrpm_va
, MSR_CSTAR
, 1, 1);
407 set_msr_interception(msrpm_va
, MSR_SYSCALL_MASK
, 1, 1);
409 set_msr_interception(msrpm_va
, MSR_K6_STAR
, 1, 1);
410 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_CS
, 1, 1);
411 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_ESP
, 1, 1);
412 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_EIP
, 1, 1);
414 for_each_online_cpu(cpu
) {
415 r
= svm_cpu_init(cpu
);
422 __free_pages(msrpm_pages
, MSRPM_ALLOC_ORDER
);
425 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
430 static __exit
void svm_hardware_unsetup(void)
432 __free_pages(pfn_to_page(msrpm_base
>> PAGE_SHIFT
), MSRPM_ALLOC_ORDER
);
433 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
434 iopm_base
= msrpm_base
= 0;
437 static void init_seg(struct vmcb_seg
*seg
)
440 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
441 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
446 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
449 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
454 static int svm_vcpu_setup(struct kvm_vcpu
*vcpu
)
459 static void init_vmcb(struct vmcb
*vmcb
)
461 struct vmcb_control_area
*control
= &vmcb
->control
;
462 struct vmcb_save_area
*save
= &vmcb
->save
;
465 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
469 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
473 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
478 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
485 control
->intercept_exceptions
= 1 << PF_VECTOR
;
488 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
489 (1ULL << INTERCEPT_NMI
) |
491 * selective cr0 intercept bug?
492 * 0: 0f 22 d8 mov %eax,%cr3
493 * 3: 0f 20 c0 mov %cr0,%eax
494 * 6: 0d 00 00 00 80 or $0x80000000,%eax
495 * b: 0f 22 c0 mov %eax,%cr0
496 * set cr3 ->interception
497 * get cr0 ->interception
498 * set cr0 -> no interception
500 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
501 (1ULL << INTERCEPT_CPUID
) |
502 (1ULL << INTERCEPT_HLT
) |
503 (1ULL << INTERCEPT_INVLPG
) |
504 (1ULL << INTERCEPT_INVLPGA
) |
505 (1ULL << INTERCEPT_IOIO_PROT
) |
506 (1ULL << INTERCEPT_MSR_PROT
) |
507 (1ULL << INTERCEPT_TASK_SWITCH
) |
508 (1ULL << INTERCEPT_VMRUN
) |
509 (1ULL << INTERCEPT_VMMCALL
) |
510 (1ULL << INTERCEPT_VMLOAD
) |
511 (1ULL << INTERCEPT_VMSAVE
) |
512 (1ULL << INTERCEPT_STGI
) |
513 (1ULL << INTERCEPT_CLGI
) |
514 (1ULL << INTERCEPT_SKINIT
);
516 control
->iopm_base_pa
= iopm_base
;
517 control
->msrpm_base_pa
= msrpm_base
;
519 control
->tsc_offset
= -tsc
;
520 control
->int_ctl
= V_INTR_MASKING_MASK
;
528 save
->cs
.selector
= 0xf000;
529 /* Executable/Readable Code Segment */
530 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
531 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
532 save
->cs
.limit
= 0xffff;
533 save
->cs
.base
= 0xffff0000;
535 save
->gdtr
.limit
= 0xffff;
536 save
->idtr
.limit
= 0xffff;
538 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
539 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
541 save
->efer
= MSR_EFER_SVME_MASK
;
543 save
->dr6
= 0xffff0ff0;
546 save
->rip
= 0x0000fff0;
549 * cr0 val on cpu init should be 0x60000010, we enable cpu
550 * cache by default. the orderly way is to enable cache in bios.
552 save
->cr0
= 0x00000010 | CR0_PG_MASK
;
553 save
->cr4
= CR4_PAE_MASK
;
557 static int svm_create_vcpu(struct kvm_vcpu
*vcpu
)
563 vcpu
->svm
= kzalloc(sizeof *vcpu
->svm
, GFP_KERNEL
);
566 page
= alloc_page(GFP_KERNEL
);
570 vcpu
->svm
->vmcb
= page_address(page
);
571 memset(vcpu
->svm
->vmcb
, 0, PAGE_SIZE
);
572 vcpu
->svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
573 vcpu
->svm
->cr0
= 0x00000010;
574 vcpu
->svm
->asid_generation
= 0;
575 memset(vcpu
->svm
->db_regs
, 0, sizeof(vcpu
->svm
->db_regs
));
576 init_vmcb(vcpu
->svm
->vmcb
);
588 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
593 __free_page(pfn_to_page(vcpu
->svm
->vmcb_pa
>> PAGE_SHIFT
));
597 static struct kvm_vcpu
*svm_vcpu_load(struct kvm_vcpu
*vcpu
)
603 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
608 static void svm_cache_regs(struct kvm_vcpu
*vcpu
)
610 vcpu
->regs
[VCPU_REGS_RAX
] = vcpu
->svm
->vmcb
->save
.rax
;
611 vcpu
->regs
[VCPU_REGS_RSP
] = vcpu
->svm
->vmcb
->save
.rsp
;
612 vcpu
->rip
= vcpu
->svm
->vmcb
->save
.rip
;
615 static void svm_decache_regs(struct kvm_vcpu
*vcpu
)
617 vcpu
->svm
->vmcb
->save
.rax
= vcpu
->regs
[VCPU_REGS_RAX
];
618 vcpu
->svm
->vmcb
->save
.rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
619 vcpu
->svm
->vmcb
->save
.rip
= vcpu
->rip
;
622 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
624 return vcpu
->svm
->vmcb
->save
.rflags
;
627 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
629 vcpu
->svm
->vmcb
->save
.rflags
= rflags
;
632 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
634 struct vmcb_save_area
*save
= &vcpu
->svm
->vmcb
->save
;
637 case VCPU_SREG_CS
: return &save
->cs
;
638 case VCPU_SREG_DS
: return &save
->ds
;
639 case VCPU_SREG_ES
: return &save
->es
;
640 case VCPU_SREG_FS
: return &save
->fs
;
641 case VCPU_SREG_GS
: return &save
->gs
;
642 case VCPU_SREG_SS
: return &save
->ss
;
643 case VCPU_SREG_TR
: return &save
->tr
;
644 case VCPU_SREG_LDTR
: return &save
->ldtr
;
650 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
652 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
657 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
658 struct kvm_segment
*var
, int seg
)
660 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
663 var
->limit
= s
->limit
;
664 var
->selector
= s
->selector
;
665 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
666 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
667 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
668 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
669 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
670 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
671 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
672 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
673 var
->unusable
= !var
->present
;
676 static void svm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
678 struct vmcb_seg
*s
= svm_seg(vcpu
, VCPU_SREG_CS
);
680 *db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
681 *l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
684 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
686 dt
->limit
= vcpu
->svm
->vmcb
->save
.ldtr
.limit
;
687 dt
->base
= vcpu
->svm
->vmcb
->save
.ldtr
.base
;
690 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
692 vcpu
->svm
->vmcb
->save
.ldtr
.limit
= dt
->limit
;
693 vcpu
->svm
->vmcb
->save
.ldtr
.base
= dt
->base
;
696 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
698 dt
->limit
= vcpu
->svm
->vmcb
->save
.gdtr
.limit
;
699 dt
->base
= vcpu
->svm
->vmcb
->save
.gdtr
.base
;
702 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
704 vcpu
->svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
705 vcpu
->svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
708 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
711 if (vcpu
->shadow_efer
& KVM_EFER_LME
) {
712 if (!is_paging(vcpu
) && (cr0
& CR0_PG_MASK
)) {
713 vcpu
->shadow_efer
|= KVM_EFER_LMA
;
714 vcpu
->svm
->vmcb
->save
.efer
|= KVM_EFER_LMA
| KVM_EFER_LME
;
717 if (is_paging(vcpu
) && !(cr0
& CR0_PG_MASK
) ) {
718 vcpu
->shadow_efer
&= ~KVM_EFER_LMA
;
719 vcpu
->svm
->vmcb
->save
.efer
&= ~(KVM_EFER_LMA
| KVM_EFER_LME
);
723 vcpu
->svm
->cr0
= cr0
;
724 vcpu
->svm
->vmcb
->save
.cr0
= cr0
| CR0_PG_MASK
;
728 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
731 vcpu
->svm
->vmcb
->save
.cr4
= cr4
| CR4_PAE_MASK
;
734 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
735 struct kvm_segment
*var
, int seg
)
737 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
740 s
->limit
= var
->limit
;
741 s
->selector
= var
->selector
;
745 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
746 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
747 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
748 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
749 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
750 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
751 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
752 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
754 if (seg
== VCPU_SREG_CS
)
755 vcpu
->svm
->vmcb
->save
.cpl
756 = (vcpu
->svm
->vmcb
->save
.cs
.attrib
757 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
763 vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
764 vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
768 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
773 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
777 for ( i
= 0; i
< NR_HOST_SAVE_MSRS
; i
++)
778 wrmsrl(host_save_msrs
[i
], vcpu
->svm
->host_msrs
[i
]);
781 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
785 for ( i
= 0; i
< NR_HOST_SAVE_MSRS
; i
++)
786 rdmsrl(host_save_msrs
[i
], vcpu
->svm
->host_msrs
[i
]);
789 static void new_asid(struct kvm_vcpu
*vcpu
, struct svm_cpu_data
*svm_data
)
791 if (svm_data
->next_asid
> svm_data
->max_asid
) {
792 ++svm_data
->asid_generation
;
793 svm_data
->next_asid
= 1;
794 vcpu
->svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
797 vcpu
->cpu
= svm_data
->cpu
;
798 vcpu
->svm
->asid_generation
= svm_data
->asid_generation
;
799 vcpu
->svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
802 static void svm_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
)
804 invlpga(address
, vcpu
->svm
->vmcb
->control
.asid
); // is needed?
807 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
809 return vcpu
->svm
->db_regs
[dr
];
812 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
817 if (vcpu
->svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
818 vcpu
->svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
819 vcpu
->svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
820 *exception
= DB_VECTOR
;
826 vcpu
->svm
->db_regs
[dr
] = value
;
829 if (vcpu
->cr4
& CR4_DE_MASK
) {
830 *exception
= UD_VECTOR
;
834 if (value
& ~((1ULL << 32) - 1)) {
835 *exception
= GP_VECTOR
;
838 vcpu
->svm
->vmcb
->save
.dr7
= value
;
842 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
844 *exception
= UD_VECTOR
;
849 static int pf_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
851 u32 exit_int_info
= vcpu
->svm
->vmcb
->control
.exit_int_info
;
854 enum emulation_result er
;
856 if (is_external_interrupt(exit_int_info
))
857 push_irq(vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
859 spin_lock(&vcpu
->kvm
->lock
);
861 fault_address
= vcpu
->svm
->vmcb
->control
.exit_info_2
;
862 error_code
= vcpu
->svm
->vmcb
->control
.exit_info_1
;
863 if (!vcpu
->mmu
.page_fault(vcpu
, fault_address
, error_code
)) {
864 spin_unlock(&vcpu
->kvm
->lock
);
867 er
= emulate_instruction(vcpu
, kvm_run
, fault_address
, error_code
);
868 spin_unlock(&vcpu
->kvm
->lock
);
873 case EMULATE_DO_MMIO
:
874 ++kvm_stat
.mmio_exits
;
875 kvm_run
->exit_reason
= KVM_EXIT_MMIO
;
878 vcpu_printf(vcpu
, "%s: emulate fail\n", __FUNCTION__
);
884 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
888 static int io_get_override(struct kvm_vcpu
*vcpu
,
889 struct vmcb_seg
**seg
,
892 u8 inst
[MAX_INST_SIZE
];
897 rip
= vcpu
->svm
->vmcb
->save
.rip
;
898 ins_length
= vcpu
->svm
->next_rip
- rip
;
899 rip
+= vcpu
->svm
->vmcb
->save
.cs
.base
;
901 if (ins_length
> MAX_INST_SIZE
)
903 "%s: inst length err, cs base 0x%llx rip 0x%llx "
904 "next rip 0x%llx ins_length %u\n",
906 vcpu
->svm
->vmcb
->save
.cs
.base
,
907 vcpu
->svm
->vmcb
->save
.rip
,
908 vcpu
->svm
->vmcb
->control
.exit_info_2
,
911 if (kvm_read_guest(vcpu
, rip
, ins_length
, inst
) != ins_length
)
917 for (i
= 0; i
< ins_length
; i
++)
928 *seg
= &vcpu
->svm
->vmcb
->save
.cs
;
931 *seg
= &vcpu
->svm
->vmcb
->save
.ss
;
934 *seg
= &vcpu
->svm
->vmcb
->save
.ds
;
937 *seg
= &vcpu
->svm
->vmcb
->save
.es
;
940 *seg
= &vcpu
->svm
->vmcb
->save
.fs
;
943 *seg
= &vcpu
->svm
->vmcb
->save
.gs
;
948 printk(KERN_DEBUG
"%s: unexpected\n", __FUNCTION__
);
952 static unsigned long io_adress(struct kvm_vcpu
*vcpu
, int ins
, u64
*address
)
954 unsigned long addr_mask
;
956 struct vmcb_seg
*seg
;
958 struct vmcb_save_area
*save_area
= &vcpu
->svm
->vmcb
->save
;
959 u16 cs_attrib
= save_area
->cs
.attrib
;
960 unsigned addr_size
= get_addr_size(vcpu
);
962 if (!io_get_override(vcpu
, &seg
, &addr_override
))
966 addr_size
= (addr_size
== 2) ? 4: (addr_size
>> 1);
969 reg
= &vcpu
->regs
[VCPU_REGS_RDI
];
970 seg
= &vcpu
->svm
->vmcb
->save
.es
;
972 reg
= &vcpu
->regs
[VCPU_REGS_RSI
];
973 seg
= (seg
) ? seg
: &vcpu
->svm
->vmcb
->save
.ds
;
976 addr_mask
= ~0ULL >> (64 - (addr_size
* 8));
978 if ((cs_attrib
& SVM_SELECTOR_L_MASK
) &&
979 !(vcpu
->svm
->vmcb
->save
.rflags
& X86_EFLAGS_VM
)) {
980 *address
= (*reg
& addr_mask
);
984 if (!(seg
->attrib
& SVM_SELECTOR_P_SHIFT
)) {
985 svm_inject_gp(vcpu
, 0);
989 *address
= (*reg
& addr_mask
) + seg
->base
;
993 static int io_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
995 u32 io_info
= vcpu
->svm
->vmcb
->control
.exit_info_1
; //address size bug?
996 int _in
= io_info
& SVM_IOIO_TYPE_MASK
;
1000 vcpu
->svm
->next_rip
= vcpu
->svm
->vmcb
->control
.exit_info_2
;
1002 kvm_run
->exit_reason
= KVM_EXIT_IO
;
1003 kvm_run
->io
.port
= io_info
>> 16;
1004 kvm_run
->io
.direction
= (_in
) ? KVM_EXIT_IO_IN
: KVM_EXIT_IO_OUT
;
1005 kvm_run
->io
.size
= ((io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
);
1006 kvm_run
->io
.string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1007 kvm_run
->io
.rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
1009 if (kvm_run
->io
.string
) {
1012 addr_mask
= io_adress(vcpu
, _in
, &kvm_run
->io
.address
);
1014 printk(KERN_DEBUG
"%s: get io address failed\n", __FUNCTION__
);
1018 if (kvm_run
->io
.rep
) {
1019 kvm_run
->io
.count
= vcpu
->regs
[VCPU_REGS_RCX
] & addr_mask
;
1020 kvm_run
->io
.string_down
= (vcpu
->svm
->vmcb
->save
.rflags
1021 & X86_EFLAGS_DF
) != 0;
1024 kvm_run
->io
.value
= vcpu
->svm
->vmcb
->save
.rax
;
1030 static int nop_on_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1035 static int halt_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1037 vcpu
->svm
->next_rip
= vcpu
->svm
->vmcb
->save
.rip
+ 1;
1038 skip_emulated_instruction(vcpu
);
1039 if (vcpu
->irq_summary
&& (vcpu
->svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
))
1042 kvm_run
->exit_reason
= KVM_EXIT_HLT
;
1046 static int invalid_op_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1052 static int task_switch_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1054 printk(KERN_DEBUG
"%s: task swiche is unsupported\n", __FUNCTION__
);
1055 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1059 static int cpuid_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1061 vcpu
->svm
->next_rip
= vcpu
->svm
->vmcb
->save
.rip
+ 2;
1062 kvm_run
->exit_reason
= KVM_EXIT_CPUID
;
1066 static int emulate_on_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1068 if (emulate_instruction(vcpu
, 0, 0, 0) != EMULATE_DONE
)
1069 printk(KERN_ERR
"%s: failed\n", __FUNCTION__
);
1073 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1076 case MSR_IA32_MC0_CTL
:
1077 case MSR_IA32_MCG_STATUS
:
1078 case MSR_IA32_MCG_CAP
:
1079 case MSR_IA32_MC0_MISC
:
1080 case MSR_IA32_MC0_MISC
+4:
1081 case MSR_IA32_MC0_MISC
+8:
1082 case MSR_IA32_MC0_MISC
+12:
1083 case MSR_IA32_MC0_MISC
+16:
1084 case MSR_IA32_UCODE_REV
:
1085 /* MTRR registers */
1087 case 0x200 ... 0x2ff:
1090 case MSR_IA32_TIME_STAMP_COUNTER
: {
1094 *data
= vcpu
->svm
->vmcb
->control
.tsc_offset
+ tsc
;
1098 *data
= vcpu
->shadow_efer
;
1100 case MSR_IA32_APICBASE
:
1101 *data
= vcpu
->apic_base
;
1104 *data
= vcpu
->svm
->vmcb
->save
.star
;
1106 #ifdef CONFIG_X86_64
1108 *data
= vcpu
->svm
->vmcb
->save
.lstar
;
1111 *data
= vcpu
->svm
->vmcb
->save
.cstar
;
1113 case MSR_KERNEL_GS_BASE
:
1114 *data
= vcpu
->svm
->vmcb
->save
.kernel_gs_base
;
1116 case MSR_SYSCALL_MASK
:
1117 *data
= vcpu
->svm
->vmcb
->save
.sfmask
;
1120 case MSR_IA32_SYSENTER_CS
:
1121 *data
= vcpu
->svm
->vmcb
->save
.sysenter_cs
;
1123 case MSR_IA32_SYSENTER_EIP
:
1124 *data
= vcpu
->svm
->vmcb
->save
.sysenter_eip
;
1126 case MSR_IA32_SYSENTER_ESP
:
1127 *data
= vcpu
->svm
->vmcb
->save
.sysenter_esp
;
1130 printk(KERN_ERR
"kvm: unhandled rdmsr: 0x%x\n", ecx
);
1136 static int rdmsr_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1138 u32 ecx
= vcpu
->regs
[VCPU_REGS_RCX
];
1141 if (svm_get_msr(vcpu
, ecx
, &data
))
1142 svm_inject_gp(vcpu
, 0);
1144 vcpu
->svm
->vmcb
->save
.rax
= data
& 0xffffffff;
1145 vcpu
->regs
[VCPU_REGS_RDX
] = data
>> 32;
1146 vcpu
->svm
->next_rip
= vcpu
->svm
->vmcb
->save
.rip
+ 2;
1147 skip_emulated_instruction(vcpu
);
1152 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1155 #ifdef CONFIG_X86_64
1157 set_efer(vcpu
, data
);
1160 case MSR_IA32_MC0_STATUS
:
1161 printk(KERN_WARNING
"%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n"
1162 , __FUNCTION__
, data
);
1164 case MSR_IA32_TIME_STAMP_COUNTER
: {
1168 vcpu
->svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1171 case MSR_IA32_UCODE_REV
:
1172 case MSR_IA32_UCODE_WRITE
:
1173 case 0x200 ... 0x2ff: /* MTRRs */
1175 case MSR_IA32_APICBASE
:
1176 vcpu
->apic_base
= data
;
1179 vcpu
->svm
->vmcb
->save
.star
= data
;
1181 #ifdef CONFIG_X86_64_
1183 vcpu
->svm
->vmcb
->save
.lstar
= data
;
1186 vcpu
->svm
->vmcb
->save
.cstar
= data
;
1188 case MSR_KERNEL_GS_BASE
:
1189 vcpu
->svm
->vmcb
->save
.kernel_gs_base
= data
;
1191 case MSR_SYSCALL_MASK
:
1192 vcpu
->svm
->vmcb
->save
.sfmask
= data
;
1195 case MSR_IA32_SYSENTER_CS
:
1196 vcpu
->svm
->vmcb
->save
.sysenter_cs
= data
;
1198 case MSR_IA32_SYSENTER_EIP
:
1199 vcpu
->svm
->vmcb
->save
.sysenter_eip
= data
;
1201 case MSR_IA32_SYSENTER_ESP
:
1202 vcpu
->svm
->vmcb
->save
.sysenter_esp
= data
;
1205 printk(KERN_ERR
"kvm: unhandled wrmsr: %x\n", ecx
);
1211 static int wrmsr_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1213 u32 ecx
= vcpu
->regs
[VCPU_REGS_RCX
];
1214 u64 data
= (vcpu
->svm
->vmcb
->save
.rax
& -1u)
1215 | ((u64
)(vcpu
->regs
[VCPU_REGS_RDX
] & -1u) << 32);
1216 vcpu
->svm
->next_rip
= vcpu
->svm
->vmcb
->save
.rip
+ 2;
1217 if (svm_set_msr(vcpu
, ecx
, data
))
1218 svm_inject_gp(vcpu
, 0);
1220 skip_emulated_instruction(vcpu
);
1224 static int msr_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1226 if (vcpu
->svm
->vmcb
->control
.exit_info_1
)
1227 return wrmsr_interception(vcpu
, kvm_run
);
1229 return rdmsr_interception(vcpu
, kvm_run
);
1232 static int (*svm_exit_handlers
[])(struct kvm_vcpu
*vcpu
,
1233 struct kvm_run
*kvm_run
) = {
1234 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1235 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1236 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1238 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1239 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1240 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1241 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1242 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1243 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1244 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1245 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1246 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1247 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1248 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1249 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1250 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1251 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1252 [SVM_EXIT_INTR
] = nop_on_interception
,
1253 [SVM_EXIT_NMI
] = nop_on_interception
,
1254 [SVM_EXIT_SMI
] = nop_on_interception
,
1255 [SVM_EXIT_INIT
] = nop_on_interception
,
1256 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1257 [SVM_EXIT_CPUID
] = cpuid_interception
,
1258 [SVM_EXIT_HLT
] = halt_interception
,
1259 [SVM_EXIT_INVLPG
] = emulate_on_interception
,
1260 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1261 [SVM_EXIT_IOIO
] = io_interception
,
1262 [SVM_EXIT_MSR
] = msr_interception
,
1263 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1264 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1265 [SVM_EXIT_VMMCALL
] = invalid_op_interception
,
1266 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1267 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1268 [SVM_EXIT_STGI
] = invalid_op_interception
,
1269 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1270 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1274 static int handle_exit(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1276 u32 exit_code
= vcpu
->svm
->vmcb
->control
.exit_code
;
1278 kvm_run
->exit_type
= KVM_EXIT_TYPE_VM_EXIT
;
1280 if (is_external_interrupt(vcpu
->svm
->vmcb
->control
.exit_int_info
) &&
1281 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
1282 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1284 __FUNCTION__
, vcpu
->svm
->vmcb
->control
.exit_int_info
,
1287 if (exit_code
>= sizeof(svm_exit_handlers
) / sizeof(*svm_exit_handlers
)
1288 || svm_exit_handlers
[exit_code
] == 0) {
1289 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1290 printk(KERN_ERR
"%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n",
1293 vcpu
->svm
->vmcb
->save
.rip
,
1295 vcpu
->svm
->vmcb
->save
.rflags
);
1299 return svm_exit_handlers
[exit_code
](vcpu
, kvm_run
);
1302 static void reload_tss(struct kvm_vcpu
*vcpu
)
1304 int cpu
= raw_smp_processor_id();
1306 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1307 svm_data
->tss_desc
->type
= 9; //available 32/64-bit TSS
1311 static void pre_svm_run(struct kvm_vcpu
*vcpu
)
1313 int cpu
= raw_smp_processor_id();
1315 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1317 vcpu
->svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1318 if (vcpu
->cpu
!= cpu
||
1319 vcpu
->svm
->asid_generation
!= svm_data
->asid_generation
)
1320 new_asid(vcpu
, svm_data
);
1324 static inline void kvm_try_inject_irq(struct kvm_vcpu
*vcpu
)
1326 struct vmcb_control_area
*control
;
1328 if (!vcpu
->irq_summary
)
1331 control
= &vcpu
->svm
->vmcb
->control
;
1333 control
->int_vector
= pop_irq(vcpu
);
1334 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1335 control
->int_ctl
|= V_IRQ_MASK
|
1336 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1339 static void kvm_reput_irq(struct kvm_vcpu
*vcpu
)
1341 struct vmcb_control_area
*control
= &vcpu
->svm
->vmcb
->control
;
1343 if (control
->int_ctl
& V_IRQ_MASK
) {
1344 control
->int_ctl
&= ~V_IRQ_MASK
;
1345 push_irq(vcpu
, control
->int_vector
);
1349 static void save_db_regs(unsigned long *db_regs
)
1351 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1352 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1353 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1354 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1357 static void load_db_regs(unsigned long *db_regs
)
1359 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1360 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1361 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1362 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1365 static int svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1372 kvm_try_inject_irq(vcpu
);
1378 save_host_msrs(vcpu
);
1379 fs_selector
= read_fs();
1380 gs_selector
= read_gs();
1381 ldt_selector
= read_ldt();
1382 vcpu
->svm
->host_cr2
= kvm_read_cr2();
1383 vcpu
->svm
->host_dr6
= read_dr6();
1384 vcpu
->svm
->host_dr7
= read_dr7();
1385 vcpu
->svm
->vmcb
->save
.cr2
= vcpu
->cr2
;
1387 if (vcpu
->svm
->vmcb
->save
.dr7
& 0xff) {
1389 save_db_regs(vcpu
->svm
->host_db_regs
);
1390 load_db_regs(vcpu
->svm
->db_regs
);
1393 fx_save(vcpu
->host_fx_image
);
1394 fx_restore(vcpu
->guest_fx_image
);
1397 #ifdef CONFIG_X86_64
1398 "push %%rbx; push %%rcx; push %%rdx;"
1399 "push %%rsi; push %%rdi; push %%rbp;"
1400 "push %%r8; push %%r9; push %%r10; push %%r11;"
1401 "push %%r12; push %%r13; push %%r14; push %%r15;"
1403 "push %%ebx; push %%ecx; push %%edx;"
1404 "push %%esi; push %%edi; push %%ebp;"
1407 #ifdef CONFIG_X86_64
1408 "mov %c[rbx](%[vcpu]), %%rbx \n\t"
1409 "mov %c[rcx](%[vcpu]), %%rcx \n\t"
1410 "mov %c[rdx](%[vcpu]), %%rdx \n\t"
1411 "mov %c[rsi](%[vcpu]), %%rsi \n\t"
1412 "mov %c[rdi](%[vcpu]), %%rdi \n\t"
1413 "mov %c[rbp](%[vcpu]), %%rbp \n\t"
1414 "mov %c[r8](%[vcpu]), %%r8 \n\t"
1415 "mov %c[r9](%[vcpu]), %%r9 \n\t"
1416 "mov %c[r10](%[vcpu]), %%r10 \n\t"
1417 "mov %c[r11](%[vcpu]), %%r11 \n\t"
1418 "mov %c[r12](%[vcpu]), %%r12 \n\t"
1419 "mov %c[r13](%[vcpu]), %%r13 \n\t"
1420 "mov %c[r14](%[vcpu]), %%r14 \n\t"
1421 "mov %c[r15](%[vcpu]), %%r15 \n\t"
1423 "mov %c[rbx](%[vcpu]), %%ebx \n\t"
1424 "mov %c[rcx](%[vcpu]), %%ecx \n\t"
1425 "mov %c[rdx](%[vcpu]), %%edx \n\t"
1426 "mov %c[rsi](%[vcpu]), %%esi \n\t"
1427 "mov %c[rdi](%[vcpu]), %%edi \n\t"
1428 "mov %c[rbp](%[vcpu]), %%ebp \n\t"
1431 #ifdef CONFIG_X86_64
1432 /* Enter guest mode */
1434 "mov %c[svm](%[vcpu]), %%rax \n\t"
1435 "mov %c[vmcb](%%rax), %%rax \n\t"
1441 /* Enter guest mode */
1443 "mov %c[svm](%[vcpu]), %%eax \n\t"
1444 "mov %c[vmcb](%%eax), %%eax \n\t"
1451 /* Save guest registers, load host registers */
1452 #ifdef CONFIG_X86_64
1453 "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1454 "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1455 "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
1456 "mov %%rsi, %c[rsi](%[vcpu]) \n\t"
1457 "mov %%rdi, %c[rdi](%[vcpu]) \n\t"
1458 "mov %%rbp, %c[rbp](%[vcpu]) \n\t"
1459 "mov %%r8, %c[r8](%[vcpu]) \n\t"
1460 "mov %%r9, %c[r9](%[vcpu]) \n\t"
1461 "mov %%r10, %c[r10](%[vcpu]) \n\t"
1462 "mov %%r11, %c[r11](%[vcpu]) \n\t"
1463 "mov %%r12, %c[r12](%[vcpu]) \n\t"
1464 "mov %%r13, %c[r13](%[vcpu]) \n\t"
1465 "mov %%r14, %c[r14](%[vcpu]) \n\t"
1466 "mov %%r15, %c[r15](%[vcpu]) \n\t"
1468 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1469 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1470 "pop %%rbp; pop %%rdi; pop %%rsi;"
1471 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1473 "mov %%ebx, %c[rbx](%[vcpu]) \n\t"
1474 "mov %%ecx, %c[rcx](%[vcpu]) \n\t"
1475 "mov %%edx, %c[rdx](%[vcpu]) \n\t"
1476 "mov %%esi, %c[rsi](%[vcpu]) \n\t"
1477 "mov %%edi, %c[rdi](%[vcpu]) \n\t"
1478 "mov %%ebp, %c[rbp](%[vcpu]) \n\t"
1480 "pop %%ebp; pop %%edi; pop %%esi;"
1481 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1485 [svm
]"i"(offsetof(struct kvm_vcpu
, svm
)),
1486 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1487 [rbx
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_RBX
])),
1488 [rcx
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_RCX
])),
1489 [rdx
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_RDX
])),
1490 [rsi
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_RSI
])),
1491 [rdi
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_RDI
])),
1492 [rbp
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_RBP
]))
1493 #ifdef CONFIG_X86_64
1494 ,[r8
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R8
])),
1495 [r9
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R9
])),
1496 [r10
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R10
])),
1497 [r11
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R11
])),
1498 [r12
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R12
])),
1499 [r13
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R13
])),
1500 [r14
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R14
])),
1501 [r15
]"i"(offsetof(struct kvm_vcpu
, regs
[VCPU_REGS_R15
]))
1505 fx_save(vcpu
->guest_fx_image
);
1506 fx_restore(vcpu
->host_fx_image
);
1508 if ((vcpu
->svm
->vmcb
->save
.dr7
& 0xff))
1509 load_db_regs(vcpu
->svm
->host_db_regs
);
1511 vcpu
->cr2
= vcpu
->svm
->vmcb
->save
.cr2
;
1513 write_dr6(vcpu
->svm
->host_dr6
);
1514 write_dr7(vcpu
->svm
->host_dr7
);
1515 kvm_write_cr2(vcpu
->svm
->host_cr2
);
1517 load_fs(fs_selector
);
1518 load_gs(gs_selector
);
1519 load_ldt(ldt_selector
);
1520 load_host_msrs(vcpu
);
1526 kvm_reput_irq(vcpu
);
1528 vcpu
->svm
->next_rip
= 0;
1530 if (vcpu
->svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1531 kvm_run
->exit_type
= KVM_EXIT_TYPE_FAIL_ENTRY
;
1532 kvm_run
->exit_reason
= vcpu
->svm
->vmcb
->control
.exit_code
;
1536 if (handle_exit(vcpu
, kvm_run
)) {
1537 if (signal_pending(current
)) {
1538 ++kvm_stat
.signal_exits
;
1547 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1549 force_new_asid(vcpu
);
1552 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1554 vcpu
->svm
->vmcb
->save
.cr3
= root
;
1555 force_new_asid(vcpu
);
1558 static void svm_inject_page_fault(struct kvm_vcpu
*vcpu
,
1562 uint32_t exit_int_info
= vcpu
->svm
->vmcb
->control
.exit_int_info
;
1564 ++kvm_stat
.pf_guest
;
1566 if (is_page_fault(exit_int_info
)) {
1568 vcpu
->svm
->vmcb
->control
.event_inj_err
= 0;
1569 vcpu
->svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1570 SVM_EVTINJ_VALID_ERR
|
1571 SVM_EVTINJ_TYPE_EXEPT
|
1576 vcpu
->svm
->vmcb
->save
.cr2
= addr
;
1577 vcpu
->svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1578 SVM_EVTINJ_VALID_ERR
|
1579 SVM_EVTINJ_TYPE_EXEPT
|
1581 vcpu
->svm
->vmcb
->control
.event_inj_err
= err_code
;
1585 static int is_disabled(void)
1590 static struct kvm_arch_ops svm_arch_ops
= {
1591 .cpu_has_kvm_support
= has_svm
,
1592 .disabled_by_bios
= is_disabled
,
1593 .hardware_setup
= svm_hardware_setup
,
1594 .hardware_unsetup
= svm_hardware_unsetup
,
1595 .hardware_enable
= svm_hardware_enable
,
1596 .hardware_disable
= svm_hardware_disable
,
1598 .vcpu_create
= svm_create_vcpu
,
1599 .vcpu_free
= svm_free_vcpu
,
1601 .vcpu_load
= svm_vcpu_load
,
1602 .vcpu_put
= svm_vcpu_put
,
1604 .set_guest_debug
= svm_guest_debug
,
1605 .get_msr
= svm_get_msr
,
1606 .set_msr
= svm_set_msr
,
1607 .get_segment_base
= svm_get_segment_base
,
1608 .get_segment
= svm_get_segment
,
1609 .set_segment
= svm_set_segment
,
1610 .is_long_mode
= svm_is_long_mode
,
1611 .get_cs_db_l_bits
= svm_get_cs_db_l_bits
,
1612 .set_cr0
= svm_set_cr0
,
1613 .set_cr0_no_modeswitch
= svm_set_cr0
,
1614 .set_cr3
= svm_set_cr3
,
1615 .set_cr4
= svm_set_cr4
,
1616 .set_efer
= svm_set_efer
,
1617 .get_idt
= svm_get_idt
,
1618 .set_idt
= svm_set_idt
,
1619 .get_gdt
= svm_get_gdt
,
1620 .set_gdt
= svm_set_gdt
,
1621 .get_dr
= svm_get_dr
,
1622 .set_dr
= svm_set_dr
,
1623 .cache_regs
= svm_cache_regs
,
1624 .decache_regs
= svm_decache_regs
,
1625 .get_rflags
= svm_get_rflags
,
1626 .set_rflags
= svm_set_rflags
,
1628 .invlpg
= svm_invlpg
,
1629 .tlb_flush
= svm_flush_tlb
,
1630 .inject_page_fault
= svm_inject_page_fault
,
1632 .inject_gp
= svm_inject_gp
,
1634 .run
= svm_vcpu_run
,
1635 .skip_emulated_instruction
= skip_emulated_instruction
,
1636 .vcpu_setup
= svm_vcpu_setup
,
1639 static int __init
svm_init(void)
1641 return kvm_init_arch(&svm_arch_ops
, THIS_MODULE
);
1644 static void __exit
svm_exit(void)
1649 module_init(svm_init
)
1650 module_exit(svm_exit
)