]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * AMD SVM support | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
7 | * | |
8 | * Authors: | |
9 | * Yaniv Kamay <yaniv@qumranet.com> | |
10 | * Avi Kivity <avi@qumranet.com> | |
11 | * | |
12 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
13 | * the COPYING file in the top-level directory. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/module.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/vmalloc.h> | |
20 | #include <linux/highmem.h> | |
21 | #include <linux/profile.h> | |
22 | #include <asm/desc.h> | |
23 | ||
24 | #include "kvm_svm.h" | |
25 | #include "x86_emulate.h" | |
26 | ||
27 | MODULE_AUTHOR("Qumranet"); | |
28 | MODULE_LICENSE("GPL"); | |
29 | ||
30 | #define IOPM_ALLOC_ORDER 2 | |
31 | #define MSRPM_ALLOC_ORDER 1 | |
32 | ||
33 | #define DB_VECTOR 1 | |
34 | #define UD_VECTOR 6 | |
35 | #define GP_VECTOR 13 | |
36 | ||
37 | #define DR7_GD_MASK (1 << 13) | |
38 | #define DR6_BD_MASK (1 << 13) | |
39 | #define CR4_DE_MASK (1UL << 3) | |
40 | ||
41 | #define SEG_TYPE_LDT 2 | |
42 | #define SEG_TYPE_BUSY_TSS16 3 | |
43 | ||
44 | #define KVM_EFER_LMA (1 << 10) | |
45 | #define KVM_EFER_LME (1 << 8) | |
46 | ||
47 | unsigned long iopm_base; | |
48 | unsigned long msrpm_base; | |
49 | ||
50 | struct kvm_ldttss_desc { | |
51 | u16 limit0; | |
52 | u16 base0; | |
53 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; | |
54 | unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; | |
55 | u32 base3; | |
56 | u32 zero1; | |
57 | } __attribute__((packed)); | |
58 | ||
59 | struct svm_cpu_data { | |
60 | int cpu; | |
61 | ||
62 | uint64_t asid_generation; | |
63 | uint32_t max_asid; | |
64 | uint32_t next_asid; | |
65 | struct kvm_ldttss_desc *tss_desc; | |
66 | ||
67 | struct page *save_area; | |
68 | }; | |
69 | ||
70 | static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); | |
71 | ||
72 | struct svm_init_data { | |
73 | int cpu; | |
74 | int r; | |
75 | }; | |
76 | ||
77 | static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; | |
78 | ||
79 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) | |
80 | #define MSRS_RANGE_SIZE 2048 | |
81 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) | |
82 | ||
83 | #define MAX_INST_SIZE 15 | |
84 | ||
85 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) | |
86 | { | |
87 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; | |
88 | u16 cs_attrib; | |
89 | ||
90 | if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM)) | |
91 | return 2; | |
92 | ||
93 | cs_attrib = sa->cs.attrib; | |
94 | ||
95 | return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 : | |
96 | (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2; | |
97 | } | |
98 | ||
99 | static inline u8 pop_irq(struct kvm_vcpu *vcpu) | |
100 | { | |
101 | int word_index = __ffs(vcpu->irq_summary); | |
102 | int bit_index = __ffs(vcpu->irq_pending[word_index]); | |
103 | int irq = word_index * BITS_PER_LONG + bit_index; | |
104 | ||
105 | clear_bit(bit_index, &vcpu->irq_pending[word_index]); | |
106 | if (!vcpu->irq_pending[word_index]) | |
107 | clear_bit(word_index, &vcpu->irq_summary); | |
108 | return irq; | |
109 | } | |
110 | ||
111 | static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) | |
112 | { | |
113 | set_bit(irq, vcpu->irq_pending); | |
114 | set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); | |
115 | } | |
116 | ||
117 | static inline void clgi(void) | |
118 | { | |
119 | asm volatile (SVM_CLGI); | |
120 | } | |
121 | ||
122 | static inline void stgi(void) | |
123 | { | |
124 | asm volatile (SVM_STGI); | |
125 | } | |
126 | ||
127 | static inline void invlpga(unsigned long addr, u32 asid) | |
128 | { | |
129 | asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid)); | |
130 | } | |
131 | ||
132 | static inline unsigned long kvm_read_cr2(void) | |
133 | { | |
134 | unsigned long cr2; | |
135 | ||
136 | asm volatile ("mov %%cr2, %0" : "=r" (cr2)); | |
137 | return cr2; | |
138 | } | |
139 | ||
140 | static inline void kvm_write_cr2(unsigned long val) | |
141 | { | |
142 | asm volatile ("mov %0, %%cr2" :: "r" (val)); | |
143 | } | |
144 | ||
145 | static inline unsigned long read_dr6(void) | |
146 | { | |
147 | unsigned long dr6; | |
148 | ||
149 | asm volatile ("mov %%dr6, %0" : "=r" (dr6)); | |
150 | return dr6; | |
151 | } | |
152 | ||
153 | static inline void write_dr6(unsigned long val) | |
154 | { | |
155 | asm volatile ("mov %0, %%dr6" :: "r" (val)); | |
156 | } | |
157 | ||
158 | static inline unsigned long read_dr7(void) | |
159 | { | |
160 | unsigned long dr7; | |
161 | ||
162 | asm volatile ("mov %%dr7, %0" : "=r" (dr7)); | |
163 | return dr7; | |
164 | } | |
165 | ||
166 | static inline void write_dr7(unsigned long val) | |
167 | { | |
168 | asm volatile ("mov %0, %%dr7" :: "r" (val)); | |
169 | } | |
170 | ||
171 | static inline void force_new_asid(struct kvm_vcpu *vcpu) | |
172 | { | |
173 | vcpu->svm->asid_generation--; | |
174 | } | |
175 | ||
176 | static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) | |
177 | { | |
178 | force_new_asid(vcpu); | |
179 | } | |
180 | ||
181 | static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |
182 | { | |
183 | if (!(efer & KVM_EFER_LMA)) | |
184 | efer &= ~KVM_EFER_LME; | |
185 | ||
186 | vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; | |
187 | vcpu->shadow_efer = efer; | |
188 | } | |
189 | ||
190 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | |
191 | { | |
192 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | |
193 | SVM_EVTINJ_VALID_ERR | | |
194 | SVM_EVTINJ_TYPE_EXEPT | | |
195 | GP_VECTOR; | |
196 | vcpu->svm->vmcb->control.event_inj_err = error_code; | |
197 | } | |
198 | ||
199 | static void inject_ud(struct kvm_vcpu *vcpu) | |
200 | { | |
201 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | |
202 | SVM_EVTINJ_TYPE_EXEPT | | |
203 | UD_VECTOR; | |
204 | } | |
205 | ||
206 | static void inject_db(struct kvm_vcpu *vcpu) | |
207 | { | |
208 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | |
209 | SVM_EVTINJ_TYPE_EXEPT | | |
210 | DB_VECTOR; | |
211 | } | |
212 | ||
213 | static int is_page_fault(uint32_t info) | |
214 | { | |
215 | info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; | |
216 | return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT); | |
217 | } | |
218 | ||
219 | static int is_external_interrupt(u32 info) | |
220 | { | |
221 | info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; | |
222 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); | |
223 | } | |
224 | ||
225 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |
226 | { | |
227 | if (!vcpu->svm->next_rip) { | |
228 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); | |
229 | return; | |
230 | } | |
231 | if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) { | |
232 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", | |
233 | __FUNCTION__, | |
234 | vcpu->svm->vmcb->save.rip, | |
235 | vcpu->svm->next_rip); | |
236 | } | |
237 | ||
238 | vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; | |
239 | vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | |
240 | ||
241 | vcpu->interrupt_window_open = 1; | |
242 | } | |
243 | ||
244 | static int has_svm(void) | |
245 | { | |
246 | uint32_t eax, ebx, ecx, edx; | |
247 | ||
248 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | |
249 | printk(KERN_INFO "has_svm: not amd\n"); | |
250 | return 0; | |
251 | } | |
252 | ||
253 | cpuid(0x80000000, &eax, &ebx, &ecx, &edx); | |
254 | if (eax < SVM_CPUID_FUNC) { | |
255 | printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n"); | |
256 | return 0; | |
257 | } | |
258 | ||
259 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); | |
260 | if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { | |
261 | printk(KERN_DEBUG "has_svm: svm not available\n"); | |
262 | return 0; | |
263 | } | |
264 | return 1; | |
265 | } | |
266 | ||
267 | static void svm_hardware_disable(void *garbage) | |
268 | { | |
269 | struct svm_cpu_data *svm_data | |
270 | = per_cpu(svm_data, raw_smp_processor_id()); | |
271 | ||
272 | if (svm_data) { | |
273 | uint64_t efer; | |
274 | ||
275 | wrmsrl(MSR_VM_HSAVE_PA, 0); | |
276 | rdmsrl(MSR_EFER, efer); | |
277 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | |
278 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; | |
279 | __free_page(svm_data->save_area); | |
280 | kfree(svm_data); | |
281 | } | |
282 | } | |
283 | ||
284 | static void svm_hardware_enable(void *garbage) | |
285 | { | |
286 | ||
287 | struct svm_cpu_data *svm_data; | |
288 | uint64_t efer; | |
289 | #ifdef CONFIG_X86_64 | |
290 | struct desc_ptr gdt_descr; | |
291 | #else | |
292 | struct Xgt_desc_struct gdt_descr; | |
293 | #endif | |
294 | struct desc_struct *gdt; | |
295 | int me = raw_smp_processor_id(); | |
296 | ||
297 | if (!has_svm()) { | |
298 | printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); | |
299 | return; | |
300 | } | |
301 | svm_data = per_cpu(svm_data, me); | |
302 | ||
303 | if (!svm_data) { | |
304 | printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", | |
305 | me); | |
306 | return; | |
307 | } | |
308 | ||
309 | svm_data->asid_generation = 1; | |
310 | svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; | |
311 | svm_data->next_asid = svm_data->max_asid + 1; | |
312 | ||
313 | asm volatile ( "sgdt %0" : "=m"(gdt_descr) ); | |
314 | gdt = (struct desc_struct *)gdt_descr.address; | |
315 | svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); | |
316 | ||
317 | rdmsrl(MSR_EFER, efer); | |
318 | wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); | |
319 | ||
320 | wrmsrl(MSR_VM_HSAVE_PA, | |
321 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); | |
322 | } | |
323 | ||
324 | static int svm_cpu_init(int cpu) | |
325 | { | |
326 | struct svm_cpu_data *svm_data; | |
327 | int r; | |
328 | ||
329 | svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); | |
330 | if (!svm_data) | |
331 | return -ENOMEM; | |
332 | svm_data->cpu = cpu; | |
333 | svm_data->save_area = alloc_page(GFP_KERNEL); | |
334 | r = -ENOMEM; | |
335 | if (!svm_data->save_area) | |
336 | goto err_1; | |
337 | ||
338 | per_cpu(svm_data, cpu) = svm_data; | |
339 | ||
340 | return 0; | |
341 | ||
342 | err_1: | |
343 | kfree(svm_data); | |
344 | return r; | |
345 | ||
346 | } | |
347 | ||
348 | static int set_msr_interception(u32 *msrpm, unsigned msr, | |
349 | int read, int write) | |
350 | { | |
351 | int i; | |
352 | ||
353 | for (i = 0; i < NUM_MSR_MAPS; i++) { | |
354 | if (msr >= msrpm_ranges[i] && | |
355 | msr < msrpm_ranges[i] + MSRS_IN_RANGE) { | |
356 | u32 msr_offset = (i * MSRS_IN_RANGE + msr - | |
357 | msrpm_ranges[i]) * 2; | |
358 | ||
359 | u32 *base = msrpm + (msr_offset / 32); | |
360 | u32 msr_shift = msr_offset % 32; | |
361 | u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); | |
362 | *base = (*base & ~(0x3 << msr_shift)) | | |
363 | (mask << msr_shift); | |
364 | return 1; | |
365 | } | |
366 | } | |
367 | printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr); | |
368 | return 0; | |
369 | } | |
370 | ||
371 | static __init int svm_hardware_setup(void) | |
372 | { | |
373 | int cpu; | |
374 | struct page *iopm_pages; | |
375 | struct page *msrpm_pages; | |
376 | void *msrpm_va; | |
377 | int r; | |
378 | ||
379 | kvm_emulator_want_group7_invlpg(); | |
380 | ||
381 | iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); | |
382 | ||
383 | if (!iopm_pages) | |
384 | return -ENOMEM; | |
385 | memset(page_address(iopm_pages), 0xff, | |
386 | PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); | |
387 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; | |
388 | ||
389 | ||
390 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | |
391 | ||
392 | r = -ENOMEM; | |
393 | if (!msrpm_pages) | |
394 | goto err_1; | |
395 | ||
396 | msrpm_va = page_address(msrpm_pages); | |
397 | memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); | |
398 | msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT; | |
399 | ||
400 | #ifdef CONFIG_X86_64 | |
401 | set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); | |
402 | set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); | |
403 | set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); | |
404 | set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1); | |
405 | set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1); | |
406 | set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1); | |
407 | #endif | |
408 | set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1); | |
409 | set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1); | |
410 | set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1); | |
411 | set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1); | |
412 | ||
413 | for_each_online_cpu(cpu) { | |
414 | r = svm_cpu_init(cpu); | |
415 | if (r) | |
416 | goto err_2; | |
417 | } | |
418 | return 0; | |
419 | ||
420 | err_2: | |
421 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | |
422 | msrpm_base = 0; | |
423 | err_1: | |
424 | __free_pages(iopm_pages, IOPM_ALLOC_ORDER); | |
425 | iopm_base = 0; | |
426 | return r; | |
427 | } | |
428 | ||
429 | static __exit void svm_hardware_unsetup(void) | |
430 | { | |
431 | __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER); | |
432 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); | |
433 | iopm_base = msrpm_base = 0; | |
434 | } | |
435 | ||
436 | static void init_seg(struct vmcb_seg *seg) | |
437 | { | |
438 | seg->selector = 0; | |
439 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | | |
440 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ | |
441 | seg->limit = 0xffff; | |
442 | seg->base = 0; | |
443 | } | |
444 | ||
445 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |
446 | { | |
447 | seg->selector = 0; | |
448 | seg->attrib = SVM_SELECTOR_P_MASK | type; | |
449 | seg->limit = 0xffff; | |
450 | seg->base = 0; | |
451 | } | |
452 | ||
453 | static int svm_vcpu_setup(struct kvm_vcpu *vcpu) | |
454 | { | |
455 | return 0; | |
456 | } | |
457 | ||
458 | static void init_vmcb(struct vmcb *vmcb) | |
459 | { | |
460 | struct vmcb_control_area *control = &vmcb->control; | |
461 | struct vmcb_save_area *save = &vmcb->save; | |
462 | u64 tsc; | |
463 | ||
464 | control->intercept_cr_read = INTERCEPT_CR0_MASK | | |
465 | INTERCEPT_CR3_MASK | | |
466 | INTERCEPT_CR4_MASK; | |
467 | ||
468 | control->intercept_cr_write = INTERCEPT_CR0_MASK | | |
469 | INTERCEPT_CR3_MASK | | |
470 | INTERCEPT_CR4_MASK; | |
471 | ||
472 | control->intercept_dr_read = INTERCEPT_DR0_MASK | | |
473 | INTERCEPT_DR1_MASK | | |
474 | INTERCEPT_DR2_MASK | | |
475 | INTERCEPT_DR3_MASK; | |
476 | ||
477 | control->intercept_dr_write = INTERCEPT_DR0_MASK | | |
478 | INTERCEPT_DR1_MASK | | |
479 | INTERCEPT_DR2_MASK | | |
480 | INTERCEPT_DR3_MASK | | |
481 | INTERCEPT_DR5_MASK | | |
482 | INTERCEPT_DR7_MASK; | |
483 | ||
484 | control->intercept_exceptions = 1 << PF_VECTOR; | |
485 | ||
486 | ||
487 | control->intercept = (1ULL << INTERCEPT_INTR) | | |
488 | (1ULL << INTERCEPT_NMI) | | |
489 | (1ULL << INTERCEPT_SMI) | | |
490 | /* | |
491 | * selective cr0 intercept bug? | |
492 | * 0: 0f 22 d8 mov %eax,%cr3 | |
493 | * 3: 0f 20 c0 mov %cr0,%eax | |
494 | * 6: 0d 00 00 00 80 or $0x80000000,%eax | |
495 | * b: 0f 22 c0 mov %eax,%cr0 | |
496 | * set cr3 ->interception | |
497 | * get cr0 ->interception | |
498 | * set cr0 -> no interception | |
499 | */ | |
500 | /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */ | |
501 | (1ULL << INTERCEPT_CPUID) | | |
502 | (1ULL << INTERCEPT_HLT) | | |
503 | (1ULL << INTERCEPT_INVLPGA) | | |
504 | (1ULL << INTERCEPT_IOIO_PROT) | | |
505 | (1ULL << INTERCEPT_MSR_PROT) | | |
506 | (1ULL << INTERCEPT_TASK_SWITCH) | | |
507 | (1ULL << INTERCEPT_SHUTDOWN) | | |
508 | (1ULL << INTERCEPT_VMRUN) | | |
509 | (1ULL << INTERCEPT_VMMCALL) | | |
510 | (1ULL << INTERCEPT_VMLOAD) | | |
511 | (1ULL << INTERCEPT_VMSAVE) | | |
512 | (1ULL << INTERCEPT_STGI) | | |
513 | (1ULL << INTERCEPT_CLGI) | | |
514 | (1ULL << INTERCEPT_SKINIT); | |
515 | ||
516 | control->iopm_base_pa = iopm_base; | |
517 | control->msrpm_base_pa = msrpm_base; | |
518 | rdtscll(tsc); | |
519 | control->tsc_offset = -tsc; | |
520 | control->int_ctl = V_INTR_MASKING_MASK; | |
521 | ||
522 | init_seg(&save->es); | |
523 | init_seg(&save->ss); | |
524 | init_seg(&save->ds); | |
525 | init_seg(&save->fs); | |
526 | init_seg(&save->gs); | |
527 | ||
528 | save->cs.selector = 0xf000; | |
529 | /* Executable/Readable Code Segment */ | |
530 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | | |
531 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; | |
532 | save->cs.limit = 0xffff; | |
533 | /* | |
534 | * cs.base should really be 0xffff0000, but vmx can't handle that, so | |
535 | * be consistent with it. | |
536 | * | |
537 | * Replace when we have real mode working for vmx. | |
538 | */ | |
539 | save->cs.base = 0xf0000; | |
540 | ||
541 | save->gdtr.limit = 0xffff; | |
542 | save->idtr.limit = 0xffff; | |
543 | ||
544 | init_sys_seg(&save->ldtr, SEG_TYPE_LDT); | |
545 | init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); | |
546 | ||
547 | save->efer = MSR_EFER_SVME_MASK; | |
548 | ||
549 | save->dr6 = 0xffff0ff0; | |
550 | save->dr7 = 0x400; | |
551 | save->rflags = 2; | |
552 | save->rip = 0x0000fff0; | |
553 | ||
554 | /* | |
555 | * cr0 val on cpu init should be 0x60000010, we enable cpu | |
556 | * cache by default. the orderly way is to enable cache in bios. | |
557 | */ | |
558 | save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; | |
559 | save->cr4 = CR4_PAE_MASK; | |
560 | /* rdx = ?? */ | |
561 | } | |
562 | ||
563 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) | |
564 | { | |
565 | struct page *page; | |
566 | int r; | |
567 | ||
568 | r = -ENOMEM; | |
569 | vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL); | |
570 | if (!vcpu->svm) | |
571 | goto out1; | |
572 | page = alloc_page(GFP_KERNEL); | |
573 | if (!page) | |
574 | goto out2; | |
575 | ||
576 | vcpu->svm->vmcb = page_address(page); | |
577 | memset(vcpu->svm->vmcb, 0, PAGE_SIZE); | |
578 | vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | |
579 | vcpu->svm->cr0 = 0x00000010; | |
580 | vcpu->svm->asid_generation = 0; | |
581 | memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); | |
582 | init_vmcb(vcpu->svm->vmcb); | |
583 | ||
584 | fx_init(vcpu); | |
585 | ||
586 | return 0; | |
587 | ||
588 | out2: | |
589 | kfree(vcpu->svm); | |
590 | out1: | |
591 | return r; | |
592 | } | |
593 | ||
594 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) | |
595 | { | |
596 | if (!vcpu->svm) | |
597 | return; | |
598 | if (vcpu->svm->vmcb) | |
599 | __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT)); | |
600 | kfree(vcpu->svm); | |
601 | } | |
602 | ||
603 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) | |
604 | { | |
605 | get_cpu(); | |
606 | } | |
607 | ||
608 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |
609 | { | |
610 | put_cpu(); | |
611 | } | |
612 | ||
613 | static void svm_vcpu_decache(struct kvm_vcpu *vcpu) | |
614 | { | |
615 | } | |
616 | ||
617 | static void svm_cache_regs(struct kvm_vcpu *vcpu) | |
618 | { | |
619 | vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; | |
620 | vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp; | |
621 | vcpu->rip = vcpu->svm->vmcb->save.rip; | |
622 | } | |
623 | ||
624 | static void svm_decache_regs(struct kvm_vcpu *vcpu) | |
625 | { | |
626 | vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; | |
627 | vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; | |
628 | vcpu->svm->vmcb->save.rip = vcpu->rip; | |
629 | } | |
630 | ||
631 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | |
632 | { | |
633 | return vcpu->svm->vmcb->save.rflags; | |
634 | } | |
635 | ||
636 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |
637 | { | |
638 | vcpu->svm->vmcb->save.rflags = rflags; | |
639 | } | |
640 | ||
641 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) | |
642 | { | |
643 | struct vmcb_save_area *save = &vcpu->svm->vmcb->save; | |
644 | ||
645 | switch (seg) { | |
646 | case VCPU_SREG_CS: return &save->cs; | |
647 | case VCPU_SREG_DS: return &save->ds; | |
648 | case VCPU_SREG_ES: return &save->es; | |
649 | case VCPU_SREG_FS: return &save->fs; | |
650 | case VCPU_SREG_GS: return &save->gs; | |
651 | case VCPU_SREG_SS: return &save->ss; | |
652 | case VCPU_SREG_TR: return &save->tr; | |
653 | case VCPU_SREG_LDTR: return &save->ldtr; | |
654 | } | |
655 | BUG(); | |
656 | return NULL; | |
657 | } | |
658 | ||
659 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) | |
660 | { | |
661 | struct vmcb_seg *s = svm_seg(vcpu, seg); | |
662 | ||
663 | return s->base; | |
664 | } | |
665 | ||
666 | static void svm_get_segment(struct kvm_vcpu *vcpu, | |
667 | struct kvm_segment *var, int seg) | |
668 | { | |
669 | struct vmcb_seg *s = svm_seg(vcpu, seg); | |
670 | ||
671 | var->base = s->base; | |
672 | var->limit = s->limit; | |
673 | var->selector = s->selector; | |
674 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; | |
675 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; | |
676 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; | |
677 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; | |
678 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; | |
679 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | |
680 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | |
681 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; | |
682 | var->unusable = !var->present; | |
683 | } | |
684 | ||
685 | static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |
686 | { | |
687 | struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS); | |
688 | ||
689 | *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | |
690 | *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | |
691 | } | |
692 | ||
693 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
694 | { | |
695 | dt->limit = vcpu->svm->vmcb->save.idtr.limit; | |
696 | dt->base = vcpu->svm->vmcb->save.idtr.base; | |
697 | } | |
698 | ||
699 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
700 | { | |
701 | vcpu->svm->vmcb->save.idtr.limit = dt->limit; | |
702 | vcpu->svm->vmcb->save.idtr.base = dt->base ; | |
703 | } | |
704 | ||
705 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
706 | { | |
707 | dt->limit = vcpu->svm->vmcb->save.gdtr.limit; | |
708 | dt->base = vcpu->svm->vmcb->save.gdtr.base; | |
709 | } | |
710 | ||
711 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |
712 | { | |
713 | vcpu->svm->vmcb->save.gdtr.limit = dt->limit; | |
714 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; | |
715 | } | |
716 | ||
717 | static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) | |
718 | { | |
719 | } | |
720 | ||
721 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |
722 | { | |
723 | #ifdef CONFIG_X86_64 | |
724 | if (vcpu->shadow_efer & KVM_EFER_LME) { | |
725 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { | |
726 | vcpu->shadow_efer |= KVM_EFER_LMA; | |
727 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; | |
728 | } | |
729 | ||
730 | if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { | |
731 | vcpu->shadow_efer &= ~KVM_EFER_LMA; | |
732 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); | |
733 | } | |
734 | } | |
735 | #endif | |
736 | vcpu->svm->cr0 = cr0; | |
737 | vcpu->svm->vmcb->save.cr0 = cr0 | CR0_PG_MASK | CR0_WP_MASK; | |
738 | vcpu->cr0 = cr0; | |
739 | } | |
740 | ||
741 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |
742 | { | |
743 | vcpu->cr4 = cr4; | |
744 | vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK; | |
745 | } | |
746 | ||
747 | static void svm_set_segment(struct kvm_vcpu *vcpu, | |
748 | struct kvm_segment *var, int seg) | |
749 | { | |
750 | struct vmcb_seg *s = svm_seg(vcpu, seg); | |
751 | ||
752 | s->base = var->base; | |
753 | s->limit = var->limit; | |
754 | s->selector = var->selector; | |
755 | if (var->unusable) | |
756 | s->attrib = 0; | |
757 | else { | |
758 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); | |
759 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; | |
760 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; | |
761 | s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; | |
762 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; | |
763 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; | |
764 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; | |
765 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | |
766 | } | |
767 | if (seg == VCPU_SREG_CS) | |
768 | vcpu->svm->vmcb->save.cpl | |
769 | = (vcpu->svm->vmcb->save.cs.attrib | |
770 | >> SVM_SELECTOR_DPL_SHIFT) & 3; | |
771 | ||
772 | } | |
773 | ||
774 | /* FIXME: | |
775 | ||
776 | vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK; | |
777 | vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); | |
778 | ||
779 | */ | |
780 | ||
781 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | |
782 | { | |
783 | return -EOPNOTSUPP; | |
784 | } | |
785 | ||
786 | static void load_host_msrs(struct kvm_vcpu *vcpu) | |
787 | { | |
788 | int i; | |
789 | ||
790 | for ( i = 0; i < NR_HOST_SAVE_MSRS; i++) | |
791 | wrmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]); | |
792 | } | |
793 | ||
794 | static void save_host_msrs(struct kvm_vcpu *vcpu) | |
795 | { | |
796 | int i; | |
797 | ||
798 | for ( i = 0; i < NR_HOST_SAVE_MSRS; i++) | |
799 | rdmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]); | |
800 | } | |
801 | ||
802 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) | |
803 | { | |
804 | if (svm_data->next_asid > svm_data->max_asid) { | |
805 | ++svm_data->asid_generation; | |
806 | svm_data->next_asid = 1; | |
807 | vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | |
808 | } | |
809 | ||
810 | vcpu->cpu = svm_data->cpu; | |
811 | vcpu->svm->asid_generation = svm_data->asid_generation; | |
812 | vcpu->svm->vmcb->control.asid = svm_data->next_asid++; | |
813 | } | |
814 | ||
815 | static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) | |
816 | { | |
817 | invlpga(address, vcpu->svm->vmcb->control.asid); // is needed? | |
818 | } | |
819 | ||
820 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | |
821 | { | |
822 | return vcpu->svm->db_regs[dr]; | |
823 | } | |
824 | ||
825 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |
826 | int *exception) | |
827 | { | |
828 | *exception = 0; | |
829 | ||
830 | if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) { | |
831 | vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK; | |
832 | vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK; | |
833 | *exception = DB_VECTOR; | |
834 | return; | |
835 | } | |
836 | ||
837 | switch (dr) { | |
838 | case 0 ... 3: | |
839 | vcpu->svm->db_regs[dr] = value; | |
840 | return; | |
841 | case 4 ... 5: | |
842 | if (vcpu->cr4 & CR4_DE_MASK) { | |
843 | *exception = UD_VECTOR; | |
844 | return; | |
845 | } | |
846 | case 7: { | |
847 | if (value & ~((1ULL << 32) - 1)) { | |
848 | *exception = GP_VECTOR; | |
849 | return; | |
850 | } | |
851 | vcpu->svm->vmcb->save.dr7 = value; | |
852 | return; | |
853 | } | |
854 | default: | |
855 | printk(KERN_DEBUG "%s: unexpected dr %u\n", | |
856 | __FUNCTION__, dr); | |
857 | *exception = UD_VECTOR; | |
858 | return; | |
859 | } | |
860 | } | |
861 | ||
862 | static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
863 | { | |
864 | u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | |
865 | u64 fault_address; | |
866 | u32 error_code; | |
867 | enum emulation_result er; | |
868 | int r; | |
869 | ||
870 | if (is_external_interrupt(exit_int_info)) | |
871 | push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); | |
872 | ||
873 | spin_lock(&vcpu->kvm->lock); | |
874 | ||
875 | fault_address = vcpu->svm->vmcb->control.exit_info_2; | |
876 | error_code = vcpu->svm->vmcb->control.exit_info_1; | |
877 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code); | |
878 | if (r < 0) { | |
879 | spin_unlock(&vcpu->kvm->lock); | |
880 | return r; | |
881 | } | |
882 | if (!r) { | |
883 | spin_unlock(&vcpu->kvm->lock); | |
884 | return 1; | |
885 | } | |
886 | er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); | |
887 | spin_unlock(&vcpu->kvm->lock); | |
888 | ||
889 | switch (er) { | |
890 | case EMULATE_DONE: | |
891 | return 1; | |
892 | case EMULATE_DO_MMIO: | |
893 | ++kvm_stat.mmio_exits; | |
894 | kvm_run->exit_reason = KVM_EXIT_MMIO; | |
895 | return 0; | |
896 | case EMULATE_FAIL: | |
897 | vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__); | |
898 | break; | |
899 | default: | |
900 | BUG(); | |
901 | } | |
902 | ||
903 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
904 | return 0; | |
905 | } | |
906 | ||
907 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
908 | { | |
909 | /* | |
910 | * VMCB is undefined after a SHUTDOWN intercept | |
911 | * so reinitialize it. | |
912 | */ | |
913 | memset(vcpu->svm->vmcb, 0, PAGE_SIZE); | |
914 | init_vmcb(vcpu->svm->vmcb); | |
915 | ||
916 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | |
917 | return 0; | |
918 | } | |
919 | ||
920 | static int io_get_override(struct kvm_vcpu *vcpu, | |
921 | struct vmcb_seg **seg, | |
922 | int *addr_override) | |
923 | { | |
924 | u8 inst[MAX_INST_SIZE]; | |
925 | unsigned ins_length; | |
926 | gva_t rip; | |
927 | int i; | |
928 | ||
929 | rip = vcpu->svm->vmcb->save.rip; | |
930 | ins_length = vcpu->svm->next_rip - rip; | |
931 | rip += vcpu->svm->vmcb->save.cs.base; | |
932 | ||
933 | if (ins_length > MAX_INST_SIZE) | |
934 | printk(KERN_DEBUG | |
935 | "%s: inst length err, cs base 0x%llx rip 0x%llx " | |
936 | "next rip 0x%llx ins_length %u\n", | |
937 | __FUNCTION__, | |
938 | vcpu->svm->vmcb->save.cs.base, | |
939 | vcpu->svm->vmcb->save.rip, | |
940 | vcpu->svm->vmcb->control.exit_info_2, | |
941 | ins_length); | |
942 | ||
943 | if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) | |
944 | /* #PF */ | |
945 | return 0; | |
946 | ||
947 | *addr_override = 0; | |
948 | *seg = NULL; | |
949 | for (i = 0; i < ins_length; i++) | |
950 | switch (inst[i]) { | |
951 | case 0xf0: | |
952 | case 0xf2: | |
953 | case 0xf3: | |
954 | case 0x66: | |
955 | continue; | |
956 | case 0x67: | |
957 | *addr_override = 1; | |
958 | continue; | |
959 | case 0x2e: | |
960 | *seg = &vcpu->svm->vmcb->save.cs; | |
961 | continue; | |
962 | case 0x36: | |
963 | *seg = &vcpu->svm->vmcb->save.ss; | |
964 | continue; | |
965 | case 0x3e: | |
966 | *seg = &vcpu->svm->vmcb->save.ds; | |
967 | continue; | |
968 | case 0x26: | |
969 | *seg = &vcpu->svm->vmcb->save.es; | |
970 | continue; | |
971 | case 0x64: | |
972 | *seg = &vcpu->svm->vmcb->save.fs; | |
973 | continue; | |
974 | case 0x65: | |
975 | *seg = &vcpu->svm->vmcb->save.gs; | |
976 | continue; | |
977 | default: | |
978 | return 1; | |
979 | } | |
980 | printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__); | |
981 | return 0; | |
982 | } | |
983 | ||
984 | static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address) | |
985 | { | |
986 | unsigned long addr_mask; | |
987 | unsigned long *reg; | |
988 | struct vmcb_seg *seg; | |
989 | int addr_override; | |
990 | struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save; | |
991 | u16 cs_attrib = save_area->cs.attrib; | |
992 | unsigned addr_size = get_addr_size(vcpu); | |
993 | ||
994 | if (!io_get_override(vcpu, &seg, &addr_override)) | |
995 | return 0; | |
996 | ||
997 | if (addr_override) | |
998 | addr_size = (addr_size == 2) ? 4: (addr_size >> 1); | |
999 | ||
1000 | if (ins) { | |
1001 | reg = &vcpu->regs[VCPU_REGS_RDI]; | |
1002 | seg = &vcpu->svm->vmcb->save.es; | |
1003 | } else { | |
1004 | reg = &vcpu->regs[VCPU_REGS_RSI]; | |
1005 | seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds; | |
1006 | } | |
1007 | ||
1008 | addr_mask = ~0ULL >> (64 - (addr_size * 8)); | |
1009 | ||
1010 | if ((cs_attrib & SVM_SELECTOR_L_MASK) && | |
1011 | !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) { | |
1012 | *address = (*reg & addr_mask); | |
1013 | return addr_mask; | |
1014 | } | |
1015 | ||
1016 | if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) { | |
1017 | svm_inject_gp(vcpu, 0); | |
1018 | return 0; | |
1019 | } | |
1020 | ||
1021 | *address = (*reg & addr_mask) + seg->base; | |
1022 | return addr_mask; | |
1023 | } | |
1024 | ||
1025 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1026 | { | |
1027 | u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? | |
1028 | int _in = io_info & SVM_IOIO_TYPE_MASK; | |
1029 | ||
1030 | ++kvm_stat.io_exits; | |
1031 | ||
1032 | vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; | |
1033 | ||
1034 | kvm_run->exit_reason = KVM_EXIT_IO; | |
1035 | kvm_run->io.port = io_info >> 16; | |
1036 | kvm_run->io.direction = (_in) ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; | |
1037 | kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT); | |
1038 | kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0; | |
1039 | kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0; | |
1040 | kvm_run->io.count = 1; | |
1041 | ||
1042 | if (kvm_run->io.string) { | |
1043 | unsigned addr_mask; | |
1044 | ||
1045 | addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); | |
1046 | if (!addr_mask) { | |
1047 | printk(KERN_DEBUG "%s: get io address failed\n", | |
1048 | __FUNCTION__); | |
1049 | return 1; | |
1050 | } | |
1051 | ||
1052 | if (kvm_run->io.rep) { | |
1053 | kvm_run->io.count | |
1054 | = vcpu->regs[VCPU_REGS_RCX] & addr_mask; | |
1055 | kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags | |
1056 | & X86_EFLAGS_DF) != 0; | |
1057 | } | |
1058 | } else | |
1059 | kvm_run->io.value = vcpu->svm->vmcb->save.rax; | |
1060 | vcpu->pio_pending = 1; | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1065 | { | |
1066 | return 1; | |
1067 | } | |
1068 | ||
1069 | static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1070 | { | |
1071 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; | |
1072 | skip_emulated_instruction(vcpu); | |
1073 | if (vcpu->irq_summary) | |
1074 | return 1; | |
1075 | ||
1076 | kvm_run->exit_reason = KVM_EXIT_HLT; | |
1077 | ++kvm_stat.halt_exits; | |
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1082 | { | |
1083 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; | |
1084 | skip_emulated_instruction(vcpu); | |
1085 | return kvm_hypercall(vcpu, kvm_run); | |
1086 | } | |
1087 | ||
1088 | static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1089 | { | |
1090 | inject_ud(vcpu); | |
1091 | return 1; | |
1092 | } | |
1093 | ||
1094 | static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1095 | { | |
1096 | printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__); | |
1097 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1102 | { | |
1103 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | |
1104 | kvm_run->exit_reason = KVM_EXIT_CPUID; | |
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1109 | { | |
1110 | if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE) | |
1111 | printk(KERN_ERR "%s: failed\n", __FUNCTION__); | |
1112 | return 1; | |
1113 | } | |
1114 | ||
1115 | static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |
1116 | { | |
1117 | switch (ecx) { | |
1118 | case MSR_IA32_TIME_STAMP_COUNTER: { | |
1119 | u64 tsc; | |
1120 | ||
1121 | rdtscll(tsc); | |
1122 | *data = vcpu->svm->vmcb->control.tsc_offset + tsc; | |
1123 | break; | |
1124 | } | |
1125 | case MSR_K6_STAR: | |
1126 | *data = vcpu->svm->vmcb->save.star; | |
1127 | break; | |
1128 | #ifdef CONFIG_X86_64 | |
1129 | case MSR_LSTAR: | |
1130 | *data = vcpu->svm->vmcb->save.lstar; | |
1131 | break; | |
1132 | case MSR_CSTAR: | |
1133 | *data = vcpu->svm->vmcb->save.cstar; | |
1134 | break; | |
1135 | case MSR_KERNEL_GS_BASE: | |
1136 | *data = vcpu->svm->vmcb->save.kernel_gs_base; | |
1137 | break; | |
1138 | case MSR_SYSCALL_MASK: | |
1139 | *data = vcpu->svm->vmcb->save.sfmask; | |
1140 | break; | |
1141 | #endif | |
1142 | case MSR_IA32_SYSENTER_CS: | |
1143 | *data = vcpu->svm->vmcb->save.sysenter_cs; | |
1144 | break; | |
1145 | case MSR_IA32_SYSENTER_EIP: | |
1146 | *data = vcpu->svm->vmcb->save.sysenter_eip; | |
1147 | break; | |
1148 | case MSR_IA32_SYSENTER_ESP: | |
1149 | *data = vcpu->svm->vmcb->save.sysenter_esp; | |
1150 | break; | |
1151 | default: | |
1152 | return kvm_get_msr_common(vcpu, ecx, data); | |
1153 | } | |
1154 | return 0; | |
1155 | } | |
1156 | ||
1157 | static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1158 | { | |
1159 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | |
1160 | u64 data; | |
1161 | ||
1162 | if (svm_get_msr(vcpu, ecx, &data)) | |
1163 | svm_inject_gp(vcpu, 0); | |
1164 | else { | |
1165 | vcpu->svm->vmcb->save.rax = data & 0xffffffff; | |
1166 | vcpu->regs[VCPU_REGS_RDX] = data >> 32; | |
1167 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | |
1168 | skip_emulated_instruction(vcpu); | |
1169 | } | |
1170 | return 1; | |
1171 | } | |
1172 | ||
1173 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |
1174 | { | |
1175 | switch (ecx) { | |
1176 | case MSR_IA32_TIME_STAMP_COUNTER: { | |
1177 | u64 tsc; | |
1178 | ||
1179 | rdtscll(tsc); | |
1180 | vcpu->svm->vmcb->control.tsc_offset = data - tsc; | |
1181 | break; | |
1182 | } | |
1183 | case MSR_K6_STAR: | |
1184 | vcpu->svm->vmcb->save.star = data; | |
1185 | break; | |
1186 | #ifdef CONFIG_X86_64 | |
1187 | case MSR_LSTAR: | |
1188 | vcpu->svm->vmcb->save.lstar = data; | |
1189 | break; | |
1190 | case MSR_CSTAR: | |
1191 | vcpu->svm->vmcb->save.cstar = data; | |
1192 | break; | |
1193 | case MSR_KERNEL_GS_BASE: | |
1194 | vcpu->svm->vmcb->save.kernel_gs_base = data; | |
1195 | break; | |
1196 | case MSR_SYSCALL_MASK: | |
1197 | vcpu->svm->vmcb->save.sfmask = data; | |
1198 | break; | |
1199 | #endif | |
1200 | case MSR_IA32_SYSENTER_CS: | |
1201 | vcpu->svm->vmcb->save.sysenter_cs = data; | |
1202 | break; | |
1203 | case MSR_IA32_SYSENTER_EIP: | |
1204 | vcpu->svm->vmcb->save.sysenter_eip = data; | |
1205 | break; | |
1206 | case MSR_IA32_SYSENTER_ESP: | |
1207 | vcpu->svm->vmcb->save.sysenter_esp = data; | |
1208 | break; | |
1209 | default: | |
1210 | return kvm_set_msr_common(vcpu, ecx, data); | |
1211 | } | |
1212 | return 0; | |
1213 | } | |
1214 | ||
1215 | static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1216 | { | |
1217 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | |
1218 | u64 data = (vcpu->svm->vmcb->save.rax & -1u) | |
1219 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); | |
1220 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | |
1221 | if (svm_set_msr(vcpu, ecx, data)) | |
1222 | svm_inject_gp(vcpu, 0); | |
1223 | else | |
1224 | skip_emulated_instruction(vcpu); | |
1225 | return 1; | |
1226 | } | |
1227 | ||
1228 | static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1229 | { | |
1230 | if (vcpu->svm->vmcb->control.exit_info_1) | |
1231 | return wrmsr_interception(vcpu, kvm_run); | |
1232 | else | |
1233 | return rdmsr_interception(vcpu, kvm_run); | |
1234 | } | |
1235 | ||
1236 | static int interrupt_window_interception(struct kvm_vcpu *vcpu, | |
1237 | struct kvm_run *kvm_run) | |
1238 | { | |
1239 | /* | |
1240 | * If the user space waits to inject interrupts, exit as soon as | |
1241 | * possible | |
1242 | */ | |
1243 | if (kvm_run->request_interrupt_window && | |
1244 | !vcpu->irq_summary) { | |
1245 | ++kvm_stat.irq_window_exits; | |
1246 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | |
1247 | return 0; | |
1248 | } | |
1249 | ||
1250 | return 1; | |
1251 | } | |
1252 | ||
1253 | static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |
1254 | struct kvm_run *kvm_run) = { | |
1255 | [SVM_EXIT_READ_CR0] = emulate_on_interception, | |
1256 | [SVM_EXIT_READ_CR3] = emulate_on_interception, | |
1257 | [SVM_EXIT_READ_CR4] = emulate_on_interception, | |
1258 | /* for now: */ | |
1259 | [SVM_EXIT_WRITE_CR0] = emulate_on_interception, | |
1260 | [SVM_EXIT_WRITE_CR3] = emulate_on_interception, | |
1261 | [SVM_EXIT_WRITE_CR4] = emulate_on_interception, | |
1262 | [SVM_EXIT_READ_DR0] = emulate_on_interception, | |
1263 | [SVM_EXIT_READ_DR1] = emulate_on_interception, | |
1264 | [SVM_EXIT_READ_DR2] = emulate_on_interception, | |
1265 | [SVM_EXIT_READ_DR3] = emulate_on_interception, | |
1266 | [SVM_EXIT_WRITE_DR0] = emulate_on_interception, | |
1267 | [SVM_EXIT_WRITE_DR1] = emulate_on_interception, | |
1268 | [SVM_EXIT_WRITE_DR2] = emulate_on_interception, | |
1269 | [SVM_EXIT_WRITE_DR3] = emulate_on_interception, | |
1270 | [SVM_EXIT_WRITE_DR5] = emulate_on_interception, | |
1271 | [SVM_EXIT_WRITE_DR7] = emulate_on_interception, | |
1272 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, | |
1273 | [SVM_EXIT_INTR] = nop_on_interception, | |
1274 | [SVM_EXIT_NMI] = nop_on_interception, | |
1275 | [SVM_EXIT_SMI] = nop_on_interception, | |
1276 | [SVM_EXIT_INIT] = nop_on_interception, | |
1277 | [SVM_EXIT_VINTR] = interrupt_window_interception, | |
1278 | /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ | |
1279 | [SVM_EXIT_CPUID] = cpuid_interception, | |
1280 | [SVM_EXIT_HLT] = halt_interception, | |
1281 | [SVM_EXIT_INVLPG] = emulate_on_interception, | |
1282 | [SVM_EXIT_INVLPGA] = invalid_op_interception, | |
1283 | [SVM_EXIT_IOIO] = io_interception, | |
1284 | [SVM_EXIT_MSR] = msr_interception, | |
1285 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, | |
1286 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, | |
1287 | [SVM_EXIT_VMRUN] = invalid_op_interception, | |
1288 | [SVM_EXIT_VMMCALL] = vmmcall_interception, | |
1289 | [SVM_EXIT_VMLOAD] = invalid_op_interception, | |
1290 | [SVM_EXIT_VMSAVE] = invalid_op_interception, | |
1291 | [SVM_EXIT_STGI] = invalid_op_interception, | |
1292 | [SVM_EXIT_CLGI] = invalid_op_interception, | |
1293 | [SVM_EXIT_SKINIT] = invalid_op_interception, | |
1294 | }; | |
1295 | ||
1296 | ||
1297 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1298 | { | |
1299 | u32 exit_code = vcpu->svm->vmcb->control.exit_code; | |
1300 | ||
1301 | kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; | |
1302 | ||
1303 | if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && | |
1304 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) | |
1305 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | |
1306 | "exit_code 0x%x\n", | |
1307 | __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, | |
1308 | exit_code); | |
1309 | ||
1310 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | |
1311 | || svm_exit_handlers[exit_code] == 0) { | |
1312 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
1313 | printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n", | |
1314 | __FUNCTION__, | |
1315 | exit_code, | |
1316 | vcpu->svm->vmcb->save.rip, | |
1317 | vcpu->cr0, | |
1318 | vcpu->svm->vmcb->save.rflags); | |
1319 | return 0; | |
1320 | } | |
1321 | ||
1322 | return svm_exit_handlers[exit_code](vcpu, kvm_run); | |
1323 | } | |
1324 | ||
1325 | static void reload_tss(struct kvm_vcpu *vcpu) | |
1326 | { | |
1327 | int cpu = raw_smp_processor_id(); | |
1328 | ||
1329 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | |
1330 | svm_data->tss_desc->type = 9; //available 32/64-bit TSS | |
1331 | load_TR_desc(); | |
1332 | } | |
1333 | ||
1334 | static void pre_svm_run(struct kvm_vcpu *vcpu) | |
1335 | { | |
1336 | int cpu = raw_smp_processor_id(); | |
1337 | ||
1338 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | |
1339 | ||
1340 | vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | |
1341 | if (vcpu->cpu != cpu || | |
1342 | vcpu->svm->asid_generation != svm_data->asid_generation) | |
1343 | new_asid(vcpu, svm_data); | |
1344 | } | |
1345 | ||
1346 | ||
1347 | static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |
1348 | { | |
1349 | struct vmcb_control_area *control; | |
1350 | ||
1351 | control = &vcpu->svm->vmcb->control; | |
1352 | control->int_vector = pop_irq(vcpu); | |
1353 | control->int_ctl &= ~V_INTR_PRIO_MASK; | |
1354 | control->int_ctl |= V_IRQ_MASK | | |
1355 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); | |
1356 | } | |
1357 | ||
1358 | static void kvm_reput_irq(struct kvm_vcpu *vcpu) | |
1359 | { | |
1360 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | |
1361 | ||
1362 | if (control->int_ctl & V_IRQ_MASK) { | |
1363 | control->int_ctl &= ~V_IRQ_MASK; | |
1364 | push_irq(vcpu, control->int_vector); | |
1365 | } | |
1366 | ||
1367 | vcpu->interrupt_window_open = | |
1368 | !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); | |
1369 | } | |
1370 | ||
1371 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |
1372 | struct kvm_run *kvm_run) | |
1373 | { | |
1374 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | |
1375 | ||
1376 | vcpu->interrupt_window_open = | |
1377 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | |
1378 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | |
1379 | ||
1380 | if (vcpu->interrupt_window_open && vcpu->irq_summary) | |
1381 | /* | |
1382 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | |
1383 | */ | |
1384 | kvm_do_inject_irq(vcpu); | |
1385 | ||
1386 | /* | |
1387 | * Interrupts blocked. Wait for unblock. | |
1388 | */ | |
1389 | if (!vcpu->interrupt_window_open && | |
1390 | (vcpu->irq_summary || kvm_run->request_interrupt_window)) { | |
1391 | control->intercept |= 1ULL << INTERCEPT_VINTR; | |
1392 | } else | |
1393 | control->intercept &= ~(1ULL << INTERCEPT_VINTR); | |
1394 | } | |
1395 | ||
1396 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | |
1397 | struct kvm_run *kvm_run) | |
1398 | { | |
1399 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && | |
1400 | vcpu->irq_summary == 0); | |
1401 | kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; | |
1402 | kvm_run->cr8 = vcpu->cr8; | |
1403 | kvm_run->apic_base = vcpu->apic_base; | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * Check if userspace requested an interrupt window, and that the | |
1408 | * interrupt window is open. | |
1409 | * | |
1410 | * No need to exit to userspace if we already have an interrupt queued. | |
1411 | */ | |
1412 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | |
1413 | struct kvm_run *kvm_run) | |
1414 | { | |
1415 | return (!vcpu->irq_summary && | |
1416 | kvm_run->request_interrupt_window && | |
1417 | vcpu->interrupt_window_open && | |
1418 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | |
1419 | } | |
1420 | ||
1421 | static void save_db_regs(unsigned long *db_regs) | |
1422 | { | |
1423 | asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); | |
1424 | asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); | |
1425 | asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); | |
1426 | asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); | |
1427 | } | |
1428 | ||
1429 | static void load_db_regs(unsigned long *db_regs) | |
1430 | { | |
1431 | asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); | |
1432 | asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); | |
1433 | asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); | |
1434 | asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); | |
1435 | } | |
1436 | ||
1437 | static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
1438 | { | |
1439 | u16 fs_selector; | |
1440 | u16 gs_selector; | |
1441 | u16 ldt_selector; | |
1442 | int r; | |
1443 | ||
1444 | again: | |
1445 | if (!vcpu->mmio_read_completed) | |
1446 | do_interrupt_requests(vcpu, kvm_run); | |
1447 | ||
1448 | clgi(); | |
1449 | ||
1450 | pre_svm_run(vcpu); | |
1451 | ||
1452 | save_host_msrs(vcpu); | |
1453 | fs_selector = read_fs(); | |
1454 | gs_selector = read_gs(); | |
1455 | ldt_selector = read_ldt(); | |
1456 | vcpu->svm->host_cr2 = kvm_read_cr2(); | |
1457 | vcpu->svm->host_dr6 = read_dr6(); | |
1458 | vcpu->svm->host_dr7 = read_dr7(); | |
1459 | vcpu->svm->vmcb->save.cr2 = vcpu->cr2; | |
1460 | ||
1461 | if (vcpu->svm->vmcb->save.dr7 & 0xff) { | |
1462 | write_dr7(0); | |
1463 | save_db_regs(vcpu->svm->host_db_regs); | |
1464 | load_db_regs(vcpu->svm->db_regs); | |
1465 | } | |
1466 | ||
1467 | fx_save(vcpu->host_fx_image); | |
1468 | fx_restore(vcpu->guest_fx_image); | |
1469 | ||
1470 | asm volatile ( | |
1471 | #ifdef CONFIG_X86_64 | |
1472 | "push %%rbx; push %%rcx; push %%rdx;" | |
1473 | "push %%rsi; push %%rdi; push %%rbp;" | |
1474 | "push %%r8; push %%r9; push %%r10; push %%r11;" | |
1475 | "push %%r12; push %%r13; push %%r14; push %%r15;" | |
1476 | #else | |
1477 | "push %%ebx; push %%ecx; push %%edx;" | |
1478 | "push %%esi; push %%edi; push %%ebp;" | |
1479 | #endif | |
1480 | ||
1481 | #ifdef CONFIG_X86_64 | |
1482 | "mov %c[rbx](%[vcpu]), %%rbx \n\t" | |
1483 | "mov %c[rcx](%[vcpu]), %%rcx \n\t" | |
1484 | "mov %c[rdx](%[vcpu]), %%rdx \n\t" | |
1485 | "mov %c[rsi](%[vcpu]), %%rsi \n\t" | |
1486 | "mov %c[rdi](%[vcpu]), %%rdi \n\t" | |
1487 | "mov %c[rbp](%[vcpu]), %%rbp \n\t" | |
1488 | "mov %c[r8](%[vcpu]), %%r8 \n\t" | |
1489 | "mov %c[r9](%[vcpu]), %%r9 \n\t" | |
1490 | "mov %c[r10](%[vcpu]), %%r10 \n\t" | |
1491 | "mov %c[r11](%[vcpu]), %%r11 \n\t" | |
1492 | "mov %c[r12](%[vcpu]), %%r12 \n\t" | |
1493 | "mov %c[r13](%[vcpu]), %%r13 \n\t" | |
1494 | "mov %c[r14](%[vcpu]), %%r14 \n\t" | |
1495 | "mov %c[r15](%[vcpu]), %%r15 \n\t" | |
1496 | #else | |
1497 | "mov %c[rbx](%[vcpu]), %%ebx \n\t" | |
1498 | "mov %c[rcx](%[vcpu]), %%ecx \n\t" | |
1499 | "mov %c[rdx](%[vcpu]), %%edx \n\t" | |
1500 | "mov %c[rsi](%[vcpu]), %%esi \n\t" | |
1501 | "mov %c[rdi](%[vcpu]), %%edi \n\t" | |
1502 | "mov %c[rbp](%[vcpu]), %%ebp \n\t" | |
1503 | #endif | |
1504 | ||
1505 | #ifdef CONFIG_X86_64 | |
1506 | /* Enter guest mode */ | |
1507 | "push %%rax \n\t" | |
1508 | "mov %c[svm](%[vcpu]), %%rax \n\t" | |
1509 | "mov %c[vmcb](%%rax), %%rax \n\t" | |
1510 | SVM_VMLOAD "\n\t" | |
1511 | SVM_VMRUN "\n\t" | |
1512 | SVM_VMSAVE "\n\t" | |
1513 | "pop %%rax \n\t" | |
1514 | #else | |
1515 | /* Enter guest mode */ | |
1516 | "push %%eax \n\t" | |
1517 | "mov %c[svm](%[vcpu]), %%eax \n\t" | |
1518 | "mov %c[vmcb](%%eax), %%eax \n\t" | |
1519 | SVM_VMLOAD "\n\t" | |
1520 | SVM_VMRUN "\n\t" | |
1521 | SVM_VMSAVE "\n\t" | |
1522 | "pop %%eax \n\t" | |
1523 | #endif | |
1524 | ||
1525 | /* Save guest registers, load host registers */ | |
1526 | #ifdef CONFIG_X86_64 | |
1527 | "mov %%rbx, %c[rbx](%[vcpu]) \n\t" | |
1528 | "mov %%rcx, %c[rcx](%[vcpu]) \n\t" | |
1529 | "mov %%rdx, %c[rdx](%[vcpu]) \n\t" | |
1530 | "mov %%rsi, %c[rsi](%[vcpu]) \n\t" | |
1531 | "mov %%rdi, %c[rdi](%[vcpu]) \n\t" | |
1532 | "mov %%rbp, %c[rbp](%[vcpu]) \n\t" | |
1533 | "mov %%r8, %c[r8](%[vcpu]) \n\t" | |
1534 | "mov %%r9, %c[r9](%[vcpu]) \n\t" | |
1535 | "mov %%r10, %c[r10](%[vcpu]) \n\t" | |
1536 | "mov %%r11, %c[r11](%[vcpu]) \n\t" | |
1537 | "mov %%r12, %c[r12](%[vcpu]) \n\t" | |
1538 | "mov %%r13, %c[r13](%[vcpu]) \n\t" | |
1539 | "mov %%r14, %c[r14](%[vcpu]) \n\t" | |
1540 | "mov %%r15, %c[r15](%[vcpu]) \n\t" | |
1541 | ||
1542 | "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" | |
1543 | "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" | |
1544 | "pop %%rbp; pop %%rdi; pop %%rsi;" | |
1545 | "pop %%rdx; pop %%rcx; pop %%rbx; \n\t" | |
1546 | #else | |
1547 | "mov %%ebx, %c[rbx](%[vcpu]) \n\t" | |
1548 | "mov %%ecx, %c[rcx](%[vcpu]) \n\t" | |
1549 | "mov %%edx, %c[rdx](%[vcpu]) \n\t" | |
1550 | "mov %%esi, %c[rsi](%[vcpu]) \n\t" | |
1551 | "mov %%edi, %c[rdi](%[vcpu]) \n\t" | |
1552 | "mov %%ebp, %c[rbp](%[vcpu]) \n\t" | |
1553 | ||
1554 | "pop %%ebp; pop %%edi; pop %%esi;" | |
1555 | "pop %%edx; pop %%ecx; pop %%ebx; \n\t" | |
1556 | #endif | |
1557 | : | |
1558 | : [vcpu]"a"(vcpu), | |
1559 | [svm]"i"(offsetof(struct kvm_vcpu, svm)), | |
1560 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), | |
1561 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), | |
1562 | [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), | |
1563 | [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])), | |
1564 | [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), | |
1565 | [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), | |
1566 | [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) | |
1567 | #ifdef CONFIG_X86_64 | |
1568 | ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), | |
1569 | [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), | |
1570 | [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), | |
1571 | [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), | |
1572 | [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), | |
1573 | [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])), | |
1574 | [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])), | |
1575 | [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])) | |
1576 | #endif | |
1577 | : "cc", "memory" ); | |
1578 | ||
1579 | fx_save(vcpu->guest_fx_image); | |
1580 | fx_restore(vcpu->host_fx_image); | |
1581 | ||
1582 | if ((vcpu->svm->vmcb->save.dr7 & 0xff)) | |
1583 | load_db_regs(vcpu->svm->host_db_regs); | |
1584 | ||
1585 | vcpu->cr2 = vcpu->svm->vmcb->save.cr2; | |
1586 | ||
1587 | write_dr6(vcpu->svm->host_dr6); | |
1588 | write_dr7(vcpu->svm->host_dr7); | |
1589 | kvm_write_cr2(vcpu->svm->host_cr2); | |
1590 | ||
1591 | load_fs(fs_selector); | |
1592 | load_gs(gs_selector); | |
1593 | load_ldt(ldt_selector); | |
1594 | load_host_msrs(vcpu); | |
1595 | ||
1596 | reload_tss(vcpu); | |
1597 | ||
1598 | /* | |
1599 | * Profile KVM exit RIPs: | |
1600 | */ | |
1601 | if (unlikely(prof_on == KVM_PROFILING)) | |
1602 | profile_hit(KVM_PROFILING, | |
1603 | (void *)(unsigned long)vcpu->svm->vmcb->save.rip); | |
1604 | ||
1605 | stgi(); | |
1606 | ||
1607 | kvm_reput_irq(vcpu); | |
1608 | ||
1609 | vcpu->svm->next_rip = 0; | |
1610 | ||
1611 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | |
1612 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; | |
1613 | kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; | |
1614 | post_kvm_run_save(vcpu, kvm_run); | |
1615 | return 0; | |
1616 | } | |
1617 | ||
1618 | r = handle_exit(vcpu, kvm_run); | |
1619 | if (r > 0) { | |
1620 | if (signal_pending(current)) { | |
1621 | ++kvm_stat.signal_exits; | |
1622 | post_kvm_run_save(vcpu, kvm_run); | |
1623 | return -EINTR; | |
1624 | } | |
1625 | ||
1626 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | |
1627 | ++kvm_stat.request_irq_exits; | |
1628 | post_kvm_run_save(vcpu, kvm_run); | |
1629 | return -EINTR; | |
1630 | } | |
1631 | kvm_resched(vcpu); | |
1632 | goto again; | |
1633 | } | |
1634 | post_kvm_run_save(vcpu, kvm_run); | |
1635 | return r; | |
1636 | } | |
1637 | ||
1638 | static void svm_flush_tlb(struct kvm_vcpu *vcpu) | |
1639 | { | |
1640 | force_new_asid(vcpu); | |
1641 | } | |
1642 | ||
1643 | static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | |
1644 | { | |
1645 | vcpu->svm->vmcb->save.cr3 = root; | |
1646 | force_new_asid(vcpu); | |
1647 | } | |
1648 | ||
1649 | static void svm_inject_page_fault(struct kvm_vcpu *vcpu, | |
1650 | unsigned long addr, | |
1651 | uint32_t err_code) | |
1652 | { | |
1653 | uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | |
1654 | ||
1655 | ++kvm_stat.pf_guest; | |
1656 | ||
1657 | if (is_page_fault(exit_int_info)) { | |
1658 | ||
1659 | vcpu->svm->vmcb->control.event_inj_err = 0; | |
1660 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | |
1661 | SVM_EVTINJ_VALID_ERR | | |
1662 | SVM_EVTINJ_TYPE_EXEPT | | |
1663 | DF_VECTOR; | |
1664 | return; | |
1665 | } | |
1666 | vcpu->cr2 = addr; | |
1667 | vcpu->svm->vmcb->save.cr2 = addr; | |
1668 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | |
1669 | SVM_EVTINJ_VALID_ERR | | |
1670 | SVM_EVTINJ_TYPE_EXEPT | | |
1671 | PF_VECTOR; | |
1672 | vcpu->svm->vmcb->control.event_inj_err = err_code; | |
1673 | } | |
1674 | ||
1675 | ||
1676 | static int is_disabled(void) | |
1677 | { | |
1678 | return 0; | |
1679 | } | |
1680 | ||
1681 | static void | |
1682 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |
1683 | { | |
1684 | /* | |
1685 | * Patch in the VMMCALL instruction: | |
1686 | */ | |
1687 | hypercall[0] = 0x0f; | |
1688 | hypercall[1] = 0x01; | |
1689 | hypercall[2] = 0xd9; | |
1690 | hypercall[3] = 0xc3; | |
1691 | } | |
1692 | ||
1693 | static struct kvm_arch_ops svm_arch_ops = { | |
1694 | .cpu_has_kvm_support = has_svm, | |
1695 | .disabled_by_bios = is_disabled, | |
1696 | .hardware_setup = svm_hardware_setup, | |
1697 | .hardware_unsetup = svm_hardware_unsetup, | |
1698 | .hardware_enable = svm_hardware_enable, | |
1699 | .hardware_disable = svm_hardware_disable, | |
1700 | ||
1701 | .vcpu_create = svm_create_vcpu, | |
1702 | .vcpu_free = svm_free_vcpu, | |
1703 | ||
1704 | .vcpu_load = svm_vcpu_load, | |
1705 | .vcpu_put = svm_vcpu_put, | |
1706 | .vcpu_decache = svm_vcpu_decache, | |
1707 | ||
1708 | .set_guest_debug = svm_guest_debug, | |
1709 | .get_msr = svm_get_msr, | |
1710 | .set_msr = svm_set_msr, | |
1711 | .get_segment_base = svm_get_segment_base, | |
1712 | .get_segment = svm_get_segment, | |
1713 | .set_segment = svm_set_segment, | |
1714 | .get_cs_db_l_bits = svm_get_cs_db_l_bits, | |
1715 | .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits, | |
1716 | .set_cr0 = svm_set_cr0, | |
1717 | .set_cr0_no_modeswitch = svm_set_cr0, | |
1718 | .set_cr3 = svm_set_cr3, | |
1719 | .set_cr4 = svm_set_cr4, | |
1720 | .set_efer = svm_set_efer, | |
1721 | .get_idt = svm_get_idt, | |
1722 | .set_idt = svm_set_idt, | |
1723 | .get_gdt = svm_get_gdt, | |
1724 | .set_gdt = svm_set_gdt, | |
1725 | .get_dr = svm_get_dr, | |
1726 | .set_dr = svm_set_dr, | |
1727 | .cache_regs = svm_cache_regs, | |
1728 | .decache_regs = svm_decache_regs, | |
1729 | .get_rflags = svm_get_rflags, | |
1730 | .set_rflags = svm_set_rflags, | |
1731 | ||
1732 | .invlpg = svm_invlpg, | |
1733 | .tlb_flush = svm_flush_tlb, | |
1734 | .inject_page_fault = svm_inject_page_fault, | |
1735 | ||
1736 | .inject_gp = svm_inject_gp, | |
1737 | ||
1738 | .run = svm_vcpu_run, | |
1739 | .skip_emulated_instruction = skip_emulated_instruction, | |
1740 | .vcpu_setup = svm_vcpu_setup, | |
1741 | .patch_hypercall = svm_patch_hypercall, | |
1742 | }; | |
1743 | ||
1744 | static int __init svm_init(void) | |
1745 | { | |
1746 | return kvm_init_arch(&svm_arch_ops, THIS_MODULE); | |
1747 | } | |
1748 | ||
1749 | static void __exit svm_exit(void) | |
1750 | { | |
1751 | kvm_exit_arch(); | |
1752 | } | |
1753 | ||
1754 | module_init(svm_init) | |
1755 | module_exit(svm_exit) |