]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kvm/svm.c
kvm: x86: emulate monitor and mwait instructions as nop
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / svm.c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17 #include <linux/kvm_host.h>
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23 #include "cpuid.h"
24
25 #include <linux/module.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/kernel.h>
28 #include <linux/vmalloc.h>
29 #include <linux/highmem.h>
30 #include <linux/sched.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/slab.h>
33
34 #include <asm/perf_event.h>
35 #include <asm/tlbflush.h>
36 #include <asm/desc.h>
37 #include <asm/debugreg.h>
38 #include <asm/kvm_para.h>
39
40 #include <asm/virtext.h>
41 #include "trace.h"
42
43 #define __ex(x) __kvm_handle_fault_on_reboot(x)
44
45 MODULE_AUTHOR("Qumranet");
46 MODULE_LICENSE("GPL");
47
48 static const struct x86_cpu_id svm_cpu_id[] = {
49 X86_FEATURE_MATCH(X86_FEATURE_SVM),
50 {}
51 };
52 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
53
54 #define IOPM_ALLOC_ORDER 2
55 #define MSRPM_ALLOC_ORDER 1
56
57 #define SEG_TYPE_LDT 2
58 #define SEG_TYPE_BUSY_TSS16 3
59
60 #define SVM_FEATURE_NPT (1 << 0)
61 #define SVM_FEATURE_LBRV (1 << 1)
62 #define SVM_FEATURE_SVML (1 << 2)
63 #define SVM_FEATURE_NRIP (1 << 3)
64 #define SVM_FEATURE_TSC_RATE (1 << 4)
65 #define SVM_FEATURE_VMCB_CLEAN (1 << 5)
66 #define SVM_FEATURE_FLUSH_ASID (1 << 6)
67 #define SVM_FEATURE_DECODE_ASSIST (1 << 7)
68 #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
69
70 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
71 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
72 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
73
74 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
75
76 #define TSC_RATIO_RSVD 0xffffff0000000000ULL
77 #define TSC_RATIO_MIN 0x0000000000000001ULL
78 #define TSC_RATIO_MAX 0x000000ffffffffffULL
79
80 static bool erratum_383_found __read_mostly;
81
82 static const u32 host_save_user_msrs[] = {
83 #ifdef CONFIG_X86_64
84 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
85 MSR_FS_BASE,
86 #endif
87 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
88 };
89
90 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
91
92 struct kvm_vcpu;
93
94 struct nested_state {
95 struct vmcb *hsave;
96 u64 hsave_msr;
97 u64 vm_cr_msr;
98 u64 vmcb;
99
100 /* These are the merged vectors */
101 u32 *msrpm;
102
103 /* gpa pointers to the real vectors */
104 u64 vmcb_msrpm;
105 u64 vmcb_iopm;
106
107 /* A VMEXIT is required but not yet emulated */
108 bool exit_required;
109
110 /* cache for intercepts of the guest */
111 u32 intercept_cr;
112 u32 intercept_dr;
113 u32 intercept_exceptions;
114 u64 intercept;
115
116 /* Nested Paging related state */
117 u64 nested_cr3;
118 };
119
120 #define MSRPM_OFFSETS 16
121 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
122
123 /*
124 * Set osvw_len to higher value when updated Revision Guides
125 * are published and we know what the new status bits are
126 */
127 static uint64_t osvw_len = 4, osvw_status;
128
129 struct vcpu_svm {
130 struct kvm_vcpu vcpu;
131 struct vmcb *vmcb;
132 unsigned long vmcb_pa;
133 struct svm_cpu_data *svm_data;
134 uint64_t asid_generation;
135 uint64_t sysenter_esp;
136 uint64_t sysenter_eip;
137
138 u64 next_rip;
139
140 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
141 struct {
142 u16 fs;
143 u16 gs;
144 u16 ldt;
145 u64 gs_base;
146 } host;
147
148 u32 *msrpm;
149
150 ulong nmi_iret_rip;
151
152 struct nested_state nested;
153
154 bool nmi_singlestep;
155
156 unsigned int3_injected;
157 unsigned long int3_rip;
158 u32 apf_reason;
159
160 u64 tsc_ratio;
161 };
162
163 static DEFINE_PER_CPU(u64, current_tsc_ratio);
164 #define TSC_RATIO_DEFAULT 0x0100000000ULL
165
166 #define MSR_INVALID 0xffffffffU
167
168 static const struct svm_direct_access_msrs {
169 u32 index; /* Index of the MSR */
170 bool always; /* True if intercept is always on */
171 } direct_access_msrs[] = {
172 { .index = MSR_STAR, .always = true },
173 { .index = MSR_IA32_SYSENTER_CS, .always = true },
174 #ifdef CONFIG_X86_64
175 { .index = MSR_GS_BASE, .always = true },
176 { .index = MSR_FS_BASE, .always = true },
177 { .index = MSR_KERNEL_GS_BASE, .always = true },
178 { .index = MSR_LSTAR, .always = true },
179 { .index = MSR_CSTAR, .always = true },
180 { .index = MSR_SYSCALL_MASK, .always = true },
181 #endif
182 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
183 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
184 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
185 { .index = MSR_IA32_LASTINTTOIP, .always = false },
186 { .index = MSR_INVALID, .always = false },
187 };
188
189 /* enable NPT for AMD64 and X86 with PAE */
190 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
191 static bool npt_enabled = true;
192 #else
193 static bool npt_enabled;
194 #endif
195
196 /* allow nested paging (virtualized MMU) for all guests */
197 static int npt = true;
198 module_param(npt, int, S_IRUGO);
199
200 /* allow nested virtualization in KVM/SVM */
201 static int nested = true;
202 module_param(nested, int, S_IRUGO);
203
204 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
205 static void svm_complete_interrupts(struct vcpu_svm *svm);
206
207 static int nested_svm_exit_handled(struct vcpu_svm *svm);
208 static int nested_svm_intercept(struct vcpu_svm *svm);
209 static int nested_svm_vmexit(struct vcpu_svm *svm);
210 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
211 bool has_error_code, u32 error_code);
212 static u64 __scale_tsc(u64 ratio, u64 tsc);
213
214 enum {
215 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
216 pause filter count */
217 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
218 VMCB_ASID, /* ASID */
219 VMCB_INTR, /* int_ctl, int_vector */
220 VMCB_NPT, /* npt_en, nCR3, gPAT */
221 VMCB_CR, /* CR0, CR3, CR4, EFER */
222 VMCB_DR, /* DR6, DR7 */
223 VMCB_DT, /* GDT, IDT */
224 VMCB_SEG, /* CS, DS, SS, ES, CPL */
225 VMCB_CR2, /* CR2 only */
226 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
227 VMCB_DIRTY_MAX,
228 };
229
230 /* TPR and CR2 are always written before VMRUN */
231 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
232
233 static inline void mark_all_dirty(struct vmcb *vmcb)
234 {
235 vmcb->control.clean = 0;
236 }
237
238 static inline void mark_all_clean(struct vmcb *vmcb)
239 {
240 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
241 & ~VMCB_ALWAYS_DIRTY_MASK;
242 }
243
244 static inline void mark_dirty(struct vmcb *vmcb, int bit)
245 {
246 vmcb->control.clean &= ~(1 << bit);
247 }
248
249 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
250 {
251 return container_of(vcpu, struct vcpu_svm, vcpu);
252 }
253
254 static void recalc_intercepts(struct vcpu_svm *svm)
255 {
256 struct vmcb_control_area *c, *h;
257 struct nested_state *g;
258
259 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
260
261 if (!is_guest_mode(&svm->vcpu))
262 return;
263
264 c = &svm->vmcb->control;
265 h = &svm->nested.hsave->control;
266 g = &svm->nested;
267
268 c->intercept_cr = h->intercept_cr | g->intercept_cr;
269 c->intercept_dr = h->intercept_dr | g->intercept_dr;
270 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
271 c->intercept = h->intercept | g->intercept;
272 }
273
274 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
275 {
276 if (is_guest_mode(&svm->vcpu))
277 return svm->nested.hsave;
278 else
279 return svm->vmcb;
280 }
281
282 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
283 {
284 struct vmcb *vmcb = get_host_vmcb(svm);
285
286 vmcb->control.intercept_cr |= (1U << bit);
287
288 recalc_intercepts(svm);
289 }
290
291 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
292 {
293 struct vmcb *vmcb = get_host_vmcb(svm);
294
295 vmcb->control.intercept_cr &= ~(1U << bit);
296
297 recalc_intercepts(svm);
298 }
299
300 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
301 {
302 struct vmcb *vmcb = get_host_vmcb(svm);
303
304 return vmcb->control.intercept_cr & (1U << bit);
305 }
306
307 static inline void set_dr_intercepts(struct vcpu_svm *svm)
308 {
309 struct vmcb *vmcb = get_host_vmcb(svm);
310
311 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
312 | (1 << INTERCEPT_DR1_READ)
313 | (1 << INTERCEPT_DR2_READ)
314 | (1 << INTERCEPT_DR3_READ)
315 | (1 << INTERCEPT_DR4_READ)
316 | (1 << INTERCEPT_DR5_READ)
317 | (1 << INTERCEPT_DR6_READ)
318 | (1 << INTERCEPT_DR7_READ)
319 | (1 << INTERCEPT_DR0_WRITE)
320 | (1 << INTERCEPT_DR1_WRITE)
321 | (1 << INTERCEPT_DR2_WRITE)
322 | (1 << INTERCEPT_DR3_WRITE)
323 | (1 << INTERCEPT_DR4_WRITE)
324 | (1 << INTERCEPT_DR5_WRITE)
325 | (1 << INTERCEPT_DR6_WRITE)
326 | (1 << INTERCEPT_DR7_WRITE);
327
328 recalc_intercepts(svm);
329 }
330
331 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
332 {
333 struct vmcb *vmcb = get_host_vmcb(svm);
334
335 vmcb->control.intercept_dr = 0;
336
337 recalc_intercepts(svm);
338 }
339
340 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
341 {
342 struct vmcb *vmcb = get_host_vmcb(svm);
343
344 vmcb->control.intercept_exceptions |= (1U << bit);
345
346 recalc_intercepts(svm);
347 }
348
349 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
350 {
351 struct vmcb *vmcb = get_host_vmcb(svm);
352
353 vmcb->control.intercept_exceptions &= ~(1U << bit);
354
355 recalc_intercepts(svm);
356 }
357
358 static inline void set_intercept(struct vcpu_svm *svm, int bit)
359 {
360 struct vmcb *vmcb = get_host_vmcb(svm);
361
362 vmcb->control.intercept |= (1ULL << bit);
363
364 recalc_intercepts(svm);
365 }
366
367 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
368 {
369 struct vmcb *vmcb = get_host_vmcb(svm);
370
371 vmcb->control.intercept &= ~(1ULL << bit);
372
373 recalc_intercepts(svm);
374 }
375
376 static inline void enable_gif(struct vcpu_svm *svm)
377 {
378 svm->vcpu.arch.hflags |= HF_GIF_MASK;
379 }
380
381 static inline void disable_gif(struct vcpu_svm *svm)
382 {
383 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
384 }
385
386 static inline bool gif_set(struct vcpu_svm *svm)
387 {
388 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
389 }
390
391 static unsigned long iopm_base;
392
393 struct kvm_ldttss_desc {
394 u16 limit0;
395 u16 base0;
396 unsigned base1:8, type:5, dpl:2, p:1;
397 unsigned limit1:4, zero0:3, g:1, base2:8;
398 u32 base3;
399 u32 zero1;
400 } __attribute__((packed));
401
402 struct svm_cpu_data {
403 int cpu;
404
405 u64 asid_generation;
406 u32 max_asid;
407 u32 next_asid;
408 struct kvm_ldttss_desc *tss_desc;
409
410 struct page *save_area;
411 };
412
413 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
414
415 struct svm_init_data {
416 int cpu;
417 int r;
418 };
419
420 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
421
422 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
423 #define MSRS_RANGE_SIZE 2048
424 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
425
426 static u32 svm_msrpm_offset(u32 msr)
427 {
428 u32 offset;
429 int i;
430
431 for (i = 0; i < NUM_MSR_MAPS; i++) {
432 if (msr < msrpm_ranges[i] ||
433 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
434 continue;
435
436 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
437 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
438
439 /* Now we have the u8 offset - but need the u32 offset */
440 return offset / 4;
441 }
442
443 /* MSR not in any range */
444 return MSR_INVALID;
445 }
446
447 #define MAX_INST_SIZE 15
448
449 static inline void clgi(void)
450 {
451 asm volatile (__ex(SVM_CLGI));
452 }
453
454 static inline void stgi(void)
455 {
456 asm volatile (__ex(SVM_STGI));
457 }
458
459 static inline void invlpga(unsigned long addr, u32 asid)
460 {
461 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
462 }
463
464 static int get_npt_level(void)
465 {
466 #ifdef CONFIG_X86_64
467 return PT64_ROOT_LEVEL;
468 #else
469 return PT32E_ROOT_LEVEL;
470 #endif
471 }
472
473 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
474 {
475 vcpu->arch.efer = efer;
476 if (!npt_enabled && !(efer & EFER_LMA))
477 efer &= ~EFER_LME;
478
479 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
480 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
481 }
482
483 static int is_external_interrupt(u32 info)
484 {
485 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
486 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
487 }
488
489 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
490 {
491 struct vcpu_svm *svm = to_svm(vcpu);
492 u32 ret = 0;
493
494 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
495 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
496 return ret & mask;
497 }
498
499 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
500 {
501 struct vcpu_svm *svm = to_svm(vcpu);
502
503 if (mask == 0)
504 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
505 else
506 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
507
508 }
509
510 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
511 {
512 struct vcpu_svm *svm = to_svm(vcpu);
513
514 if (svm->vmcb->control.next_rip != 0)
515 svm->next_rip = svm->vmcb->control.next_rip;
516
517 if (!svm->next_rip) {
518 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
519 EMULATE_DONE)
520 printk(KERN_DEBUG "%s: NOP\n", __func__);
521 return;
522 }
523 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
524 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
525 __func__, kvm_rip_read(vcpu), svm->next_rip);
526
527 kvm_rip_write(vcpu, svm->next_rip);
528 svm_set_interrupt_shadow(vcpu, 0);
529 }
530
531 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
532 bool has_error_code, u32 error_code,
533 bool reinject)
534 {
535 struct vcpu_svm *svm = to_svm(vcpu);
536
537 /*
538 * If we are within a nested VM we'd better #VMEXIT and let the guest
539 * handle the exception
540 */
541 if (!reinject &&
542 nested_svm_check_exception(svm, nr, has_error_code, error_code))
543 return;
544
545 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
546 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
547
548 /*
549 * For guest debugging where we have to reinject #BP if some
550 * INT3 is guest-owned:
551 * Emulate nRIP by moving RIP forward. Will fail if injection
552 * raises a fault that is not intercepted. Still better than
553 * failing in all cases.
554 */
555 skip_emulated_instruction(&svm->vcpu);
556 rip = kvm_rip_read(&svm->vcpu);
557 svm->int3_rip = rip + svm->vmcb->save.cs.base;
558 svm->int3_injected = rip - old_rip;
559 }
560
561 svm->vmcb->control.event_inj = nr
562 | SVM_EVTINJ_VALID
563 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
564 | SVM_EVTINJ_TYPE_EXEPT;
565 svm->vmcb->control.event_inj_err = error_code;
566 }
567
568 static void svm_init_erratum_383(void)
569 {
570 u32 low, high;
571 int err;
572 u64 val;
573
574 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
575 return;
576
577 /* Use _safe variants to not break nested virtualization */
578 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
579 if (err)
580 return;
581
582 val |= (1ULL << 47);
583
584 low = lower_32_bits(val);
585 high = upper_32_bits(val);
586
587 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
588
589 erratum_383_found = true;
590 }
591
592 static void svm_init_osvw(struct kvm_vcpu *vcpu)
593 {
594 /*
595 * Guests should see errata 400 and 415 as fixed (assuming that
596 * HLT and IO instructions are intercepted).
597 */
598 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
599 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
600
601 /*
602 * By increasing VCPU's osvw.length to 3 we are telling the guest that
603 * all osvw.status bits inside that length, including bit 0 (which is
604 * reserved for erratum 298), are valid. However, if host processor's
605 * osvw_len is 0 then osvw_status[0] carries no information. We need to
606 * be conservative here and therefore we tell the guest that erratum 298
607 * is present (because we really don't know).
608 */
609 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
610 vcpu->arch.osvw.status |= 1;
611 }
612
613 static int has_svm(void)
614 {
615 const char *msg;
616
617 if (!cpu_has_svm(&msg)) {
618 printk(KERN_INFO "has_svm: %s\n", msg);
619 return 0;
620 }
621
622 return 1;
623 }
624
625 static void svm_hardware_disable(void *garbage)
626 {
627 /* Make sure we clean up behind us */
628 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
629 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
630
631 cpu_svm_disable();
632
633 amd_pmu_disable_virt();
634 }
635
636 static int svm_hardware_enable(void *garbage)
637 {
638
639 struct svm_cpu_data *sd;
640 uint64_t efer;
641 struct desc_ptr gdt_descr;
642 struct desc_struct *gdt;
643 int me = raw_smp_processor_id();
644
645 rdmsrl(MSR_EFER, efer);
646 if (efer & EFER_SVME)
647 return -EBUSY;
648
649 if (!has_svm()) {
650 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
651 return -EINVAL;
652 }
653 sd = per_cpu(svm_data, me);
654 if (!sd) {
655 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
656 return -EINVAL;
657 }
658
659 sd->asid_generation = 1;
660 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
661 sd->next_asid = sd->max_asid + 1;
662
663 native_store_gdt(&gdt_descr);
664 gdt = (struct desc_struct *)gdt_descr.address;
665 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
666
667 wrmsrl(MSR_EFER, efer | EFER_SVME);
668
669 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
670
671 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
672 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
673 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
674 }
675
676
677 /*
678 * Get OSVW bits.
679 *
680 * Note that it is possible to have a system with mixed processor
681 * revisions and therefore different OSVW bits. If bits are not the same
682 * on different processors then choose the worst case (i.e. if erratum
683 * is present on one processor and not on another then assume that the
684 * erratum is present everywhere).
685 */
686 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
687 uint64_t len, status = 0;
688 int err;
689
690 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
691 if (!err)
692 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
693 &err);
694
695 if (err)
696 osvw_status = osvw_len = 0;
697 else {
698 if (len < osvw_len)
699 osvw_len = len;
700 osvw_status |= status;
701 osvw_status &= (1ULL << osvw_len) - 1;
702 }
703 } else
704 osvw_status = osvw_len = 0;
705
706 svm_init_erratum_383();
707
708 amd_pmu_enable_virt();
709
710 return 0;
711 }
712
713 static void svm_cpu_uninit(int cpu)
714 {
715 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
716
717 if (!sd)
718 return;
719
720 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
721 __free_page(sd->save_area);
722 kfree(sd);
723 }
724
725 static int svm_cpu_init(int cpu)
726 {
727 struct svm_cpu_data *sd;
728 int r;
729
730 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
731 if (!sd)
732 return -ENOMEM;
733 sd->cpu = cpu;
734 sd->save_area = alloc_page(GFP_KERNEL);
735 r = -ENOMEM;
736 if (!sd->save_area)
737 goto err_1;
738
739 per_cpu(svm_data, cpu) = sd;
740
741 return 0;
742
743 err_1:
744 kfree(sd);
745 return r;
746
747 }
748
749 static bool valid_msr_intercept(u32 index)
750 {
751 int i;
752
753 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
754 if (direct_access_msrs[i].index == index)
755 return true;
756
757 return false;
758 }
759
760 static void set_msr_interception(u32 *msrpm, unsigned msr,
761 int read, int write)
762 {
763 u8 bit_read, bit_write;
764 unsigned long tmp;
765 u32 offset;
766
767 /*
768 * If this warning triggers extend the direct_access_msrs list at the
769 * beginning of the file
770 */
771 WARN_ON(!valid_msr_intercept(msr));
772
773 offset = svm_msrpm_offset(msr);
774 bit_read = 2 * (msr & 0x0f);
775 bit_write = 2 * (msr & 0x0f) + 1;
776 tmp = msrpm[offset];
777
778 BUG_ON(offset == MSR_INVALID);
779
780 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
781 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
782
783 msrpm[offset] = tmp;
784 }
785
786 static void svm_vcpu_init_msrpm(u32 *msrpm)
787 {
788 int i;
789
790 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
791
792 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
793 if (!direct_access_msrs[i].always)
794 continue;
795
796 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
797 }
798 }
799
800 static void add_msr_offset(u32 offset)
801 {
802 int i;
803
804 for (i = 0; i < MSRPM_OFFSETS; ++i) {
805
806 /* Offset already in list? */
807 if (msrpm_offsets[i] == offset)
808 return;
809
810 /* Slot used by another offset? */
811 if (msrpm_offsets[i] != MSR_INVALID)
812 continue;
813
814 /* Add offset to list */
815 msrpm_offsets[i] = offset;
816
817 return;
818 }
819
820 /*
821 * If this BUG triggers the msrpm_offsets table has an overflow. Just
822 * increase MSRPM_OFFSETS in this case.
823 */
824 BUG();
825 }
826
827 static void init_msrpm_offsets(void)
828 {
829 int i;
830
831 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
832
833 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
834 u32 offset;
835
836 offset = svm_msrpm_offset(direct_access_msrs[i].index);
837 BUG_ON(offset == MSR_INVALID);
838
839 add_msr_offset(offset);
840 }
841 }
842
843 static void svm_enable_lbrv(struct vcpu_svm *svm)
844 {
845 u32 *msrpm = svm->msrpm;
846
847 svm->vmcb->control.lbr_ctl = 1;
848 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
849 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
850 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
851 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
852 }
853
854 static void svm_disable_lbrv(struct vcpu_svm *svm)
855 {
856 u32 *msrpm = svm->msrpm;
857
858 svm->vmcb->control.lbr_ctl = 0;
859 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
860 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
861 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
862 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
863 }
864
865 static __init int svm_hardware_setup(void)
866 {
867 int cpu;
868 struct page *iopm_pages;
869 void *iopm_va;
870 int r;
871
872 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
873
874 if (!iopm_pages)
875 return -ENOMEM;
876
877 iopm_va = page_address(iopm_pages);
878 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
879 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
880
881 init_msrpm_offsets();
882
883 if (boot_cpu_has(X86_FEATURE_NX))
884 kvm_enable_efer_bits(EFER_NX);
885
886 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
887 kvm_enable_efer_bits(EFER_FFXSR);
888
889 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
890 u64 max;
891
892 kvm_has_tsc_control = true;
893
894 /*
895 * Make sure the user can only configure tsc_khz values that
896 * fit into a signed integer.
897 * A min value is not calculated needed because it will always
898 * be 1 on all machines and a value of 0 is used to disable
899 * tsc-scaling for the vcpu.
900 */
901 max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
902
903 kvm_max_guest_tsc_khz = max;
904 }
905
906 if (nested) {
907 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
908 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
909 }
910
911 for_each_possible_cpu(cpu) {
912 r = svm_cpu_init(cpu);
913 if (r)
914 goto err;
915 }
916
917 if (!boot_cpu_has(X86_FEATURE_NPT))
918 npt_enabled = false;
919
920 if (npt_enabled && !npt) {
921 printk(KERN_INFO "kvm: Nested Paging disabled\n");
922 npt_enabled = false;
923 }
924
925 if (npt_enabled) {
926 printk(KERN_INFO "kvm: Nested Paging enabled\n");
927 kvm_enable_tdp();
928 } else
929 kvm_disable_tdp();
930
931 return 0;
932
933 err:
934 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
935 iopm_base = 0;
936 return r;
937 }
938
939 static __exit void svm_hardware_unsetup(void)
940 {
941 int cpu;
942
943 for_each_possible_cpu(cpu)
944 svm_cpu_uninit(cpu);
945
946 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
947 iopm_base = 0;
948 }
949
950 static void init_seg(struct vmcb_seg *seg)
951 {
952 seg->selector = 0;
953 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
954 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
955 seg->limit = 0xffff;
956 seg->base = 0;
957 }
958
959 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
960 {
961 seg->selector = 0;
962 seg->attrib = SVM_SELECTOR_P_MASK | type;
963 seg->limit = 0xffff;
964 seg->base = 0;
965 }
966
967 static u64 __scale_tsc(u64 ratio, u64 tsc)
968 {
969 u64 mult, frac, _tsc;
970
971 mult = ratio >> 32;
972 frac = ratio & ((1ULL << 32) - 1);
973
974 _tsc = tsc;
975 _tsc *= mult;
976 _tsc += (tsc >> 32) * frac;
977 _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
978
979 return _tsc;
980 }
981
982 static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
983 {
984 struct vcpu_svm *svm = to_svm(vcpu);
985 u64 _tsc = tsc;
986
987 if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
988 _tsc = __scale_tsc(svm->tsc_ratio, tsc);
989
990 return _tsc;
991 }
992
993 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
994 {
995 struct vcpu_svm *svm = to_svm(vcpu);
996 u64 ratio;
997 u64 khz;
998
999 /* Guest TSC same frequency as host TSC? */
1000 if (!scale) {
1001 svm->tsc_ratio = TSC_RATIO_DEFAULT;
1002 return;
1003 }
1004
1005 /* TSC scaling supported? */
1006 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1007 if (user_tsc_khz > tsc_khz) {
1008 vcpu->arch.tsc_catchup = 1;
1009 vcpu->arch.tsc_always_catchup = 1;
1010 } else
1011 WARN(1, "user requested TSC rate below hardware speed\n");
1012 return;
1013 }
1014
1015 khz = user_tsc_khz;
1016
1017 /* TSC scaling required - calculate ratio */
1018 ratio = khz << 32;
1019 do_div(ratio, tsc_khz);
1020
1021 if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
1022 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
1023 user_tsc_khz);
1024 return;
1025 }
1026 svm->tsc_ratio = ratio;
1027 }
1028
1029 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
1030 {
1031 struct vcpu_svm *svm = to_svm(vcpu);
1032
1033 return svm->vmcb->control.tsc_offset;
1034 }
1035
1036 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1037 {
1038 struct vcpu_svm *svm = to_svm(vcpu);
1039 u64 g_tsc_offset = 0;
1040
1041 if (is_guest_mode(vcpu)) {
1042 g_tsc_offset = svm->vmcb->control.tsc_offset -
1043 svm->nested.hsave->control.tsc_offset;
1044 svm->nested.hsave->control.tsc_offset = offset;
1045 } else
1046 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1047 svm->vmcb->control.tsc_offset,
1048 offset);
1049
1050 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1051
1052 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1053 }
1054
1055 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
1056 {
1057 struct vcpu_svm *svm = to_svm(vcpu);
1058
1059 WARN_ON(adjustment < 0);
1060 if (host)
1061 adjustment = svm_scale_tsc(vcpu, adjustment);
1062
1063 svm->vmcb->control.tsc_offset += adjustment;
1064 if (is_guest_mode(vcpu))
1065 svm->nested.hsave->control.tsc_offset += adjustment;
1066 else
1067 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1068 svm->vmcb->control.tsc_offset - adjustment,
1069 svm->vmcb->control.tsc_offset);
1070
1071 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1072 }
1073
1074 static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1075 {
1076 u64 tsc;
1077
1078 tsc = svm_scale_tsc(vcpu, native_read_tsc());
1079
1080 return target_tsc - tsc;
1081 }
1082
1083 static void init_vmcb(struct vcpu_svm *svm)
1084 {
1085 struct vmcb_control_area *control = &svm->vmcb->control;
1086 struct vmcb_save_area *save = &svm->vmcb->save;
1087
1088 svm->vcpu.fpu_active = 1;
1089 svm->vcpu.arch.hflags = 0;
1090
1091 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1092 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1093 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1094 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1095 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1096 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1097 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1098
1099 set_dr_intercepts(svm);
1100
1101 set_exception_intercept(svm, PF_VECTOR);
1102 set_exception_intercept(svm, UD_VECTOR);
1103 set_exception_intercept(svm, MC_VECTOR);
1104
1105 set_intercept(svm, INTERCEPT_INTR);
1106 set_intercept(svm, INTERCEPT_NMI);
1107 set_intercept(svm, INTERCEPT_SMI);
1108 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1109 set_intercept(svm, INTERCEPT_RDPMC);
1110 set_intercept(svm, INTERCEPT_CPUID);
1111 set_intercept(svm, INTERCEPT_INVD);
1112 set_intercept(svm, INTERCEPT_HLT);
1113 set_intercept(svm, INTERCEPT_INVLPG);
1114 set_intercept(svm, INTERCEPT_INVLPGA);
1115 set_intercept(svm, INTERCEPT_IOIO_PROT);
1116 set_intercept(svm, INTERCEPT_MSR_PROT);
1117 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1118 set_intercept(svm, INTERCEPT_SHUTDOWN);
1119 set_intercept(svm, INTERCEPT_VMRUN);
1120 set_intercept(svm, INTERCEPT_VMMCALL);
1121 set_intercept(svm, INTERCEPT_VMLOAD);
1122 set_intercept(svm, INTERCEPT_VMSAVE);
1123 set_intercept(svm, INTERCEPT_STGI);
1124 set_intercept(svm, INTERCEPT_CLGI);
1125 set_intercept(svm, INTERCEPT_SKINIT);
1126 set_intercept(svm, INTERCEPT_WBINVD);
1127 set_intercept(svm, INTERCEPT_MONITOR);
1128 set_intercept(svm, INTERCEPT_MWAIT);
1129 set_intercept(svm, INTERCEPT_XSETBV);
1130
1131 control->iopm_base_pa = iopm_base;
1132 control->msrpm_base_pa = __pa(svm->msrpm);
1133 control->int_ctl = V_INTR_MASKING_MASK;
1134
1135 init_seg(&save->es);
1136 init_seg(&save->ss);
1137 init_seg(&save->ds);
1138 init_seg(&save->fs);
1139 init_seg(&save->gs);
1140
1141 save->cs.selector = 0xf000;
1142 save->cs.base = 0xffff0000;
1143 /* Executable/Readable Code Segment */
1144 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1145 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1146 save->cs.limit = 0xffff;
1147
1148 save->gdtr.limit = 0xffff;
1149 save->idtr.limit = 0xffff;
1150
1151 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1152 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1153
1154 svm_set_efer(&svm->vcpu, 0);
1155 save->dr6 = 0xffff0ff0;
1156 kvm_set_rflags(&svm->vcpu, 2);
1157 save->rip = 0x0000fff0;
1158 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1159
1160 /*
1161 * This is the guest-visible cr0 value.
1162 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1163 */
1164 svm->vcpu.arch.cr0 = 0;
1165 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1166
1167 save->cr4 = X86_CR4_PAE;
1168 /* rdx = ?? */
1169
1170 if (npt_enabled) {
1171 /* Setup VMCB for Nested Paging */
1172 control->nested_ctl = 1;
1173 clr_intercept(svm, INTERCEPT_INVLPG);
1174 clr_exception_intercept(svm, PF_VECTOR);
1175 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1176 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1177 save->g_pat = 0x0007040600070406ULL;
1178 save->cr3 = 0;
1179 save->cr4 = 0;
1180 }
1181 svm->asid_generation = 0;
1182
1183 svm->nested.vmcb = 0;
1184 svm->vcpu.arch.hflags = 0;
1185
1186 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1187 control->pause_filter_count = 3000;
1188 set_intercept(svm, INTERCEPT_PAUSE);
1189 }
1190
1191 mark_all_dirty(svm->vmcb);
1192
1193 enable_gif(svm);
1194 }
1195
1196 static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
1197 {
1198 struct vcpu_svm *svm = to_svm(vcpu);
1199 u32 dummy;
1200 u32 eax = 1;
1201
1202 init_vmcb(svm);
1203
1204 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1205 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1206 }
1207
1208 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1209 {
1210 struct vcpu_svm *svm;
1211 struct page *page;
1212 struct page *msrpm_pages;
1213 struct page *hsave_page;
1214 struct page *nested_msrpm_pages;
1215 int err;
1216
1217 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1218 if (!svm) {
1219 err = -ENOMEM;
1220 goto out;
1221 }
1222
1223 svm->tsc_ratio = TSC_RATIO_DEFAULT;
1224
1225 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1226 if (err)
1227 goto free_svm;
1228
1229 err = -ENOMEM;
1230 page = alloc_page(GFP_KERNEL);
1231 if (!page)
1232 goto uninit;
1233
1234 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1235 if (!msrpm_pages)
1236 goto free_page1;
1237
1238 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1239 if (!nested_msrpm_pages)
1240 goto free_page2;
1241
1242 hsave_page = alloc_page(GFP_KERNEL);
1243 if (!hsave_page)
1244 goto free_page3;
1245
1246 svm->nested.hsave = page_address(hsave_page);
1247
1248 svm->msrpm = page_address(msrpm_pages);
1249 svm_vcpu_init_msrpm(svm->msrpm);
1250
1251 svm->nested.msrpm = page_address(nested_msrpm_pages);
1252 svm_vcpu_init_msrpm(svm->nested.msrpm);
1253
1254 svm->vmcb = page_address(page);
1255 clear_page(svm->vmcb);
1256 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1257 svm->asid_generation = 0;
1258 init_vmcb(svm);
1259
1260 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1261 if (kvm_vcpu_is_bsp(&svm->vcpu))
1262 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1263
1264 svm_init_osvw(&svm->vcpu);
1265
1266 return &svm->vcpu;
1267
1268 free_page3:
1269 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1270 free_page2:
1271 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1272 free_page1:
1273 __free_page(page);
1274 uninit:
1275 kvm_vcpu_uninit(&svm->vcpu);
1276 free_svm:
1277 kmem_cache_free(kvm_vcpu_cache, svm);
1278 out:
1279 return ERR_PTR(err);
1280 }
1281
1282 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1283 {
1284 struct vcpu_svm *svm = to_svm(vcpu);
1285
1286 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1287 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1288 __free_page(virt_to_page(svm->nested.hsave));
1289 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1290 kvm_vcpu_uninit(vcpu);
1291 kmem_cache_free(kvm_vcpu_cache, svm);
1292 }
1293
1294 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1295 {
1296 struct vcpu_svm *svm = to_svm(vcpu);
1297 int i;
1298
1299 if (unlikely(cpu != vcpu->cpu)) {
1300 svm->asid_generation = 0;
1301 mark_all_dirty(svm->vmcb);
1302 }
1303
1304 #ifdef CONFIG_X86_64
1305 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1306 #endif
1307 savesegment(fs, svm->host.fs);
1308 savesegment(gs, svm->host.gs);
1309 svm->host.ldt = kvm_read_ldt();
1310
1311 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1312 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1313
1314 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1315 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
1316 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
1317 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1318 }
1319 }
1320
1321 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1322 {
1323 struct vcpu_svm *svm = to_svm(vcpu);
1324 int i;
1325
1326 ++vcpu->stat.host_state_reload;
1327 kvm_load_ldt(svm->host.ldt);
1328 #ifdef CONFIG_X86_64
1329 loadsegment(fs, svm->host.fs);
1330 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1331 load_gs_index(svm->host.gs);
1332 #else
1333 #ifdef CONFIG_X86_32_LAZY_GS
1334 loadsegment(gs, svm->host.gs);
1335 #endif
1336 #endif
1337 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1338 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1339 }
1340
1341 static void svm_update_cpl(struct kvm_vcpu *vcpu)
1342 {
1343 struct vcpu_svm *svm = to_svm(vcpu);
1344 int cpl;
1345
1346 if (!is_protmode(vcpu))
1347 cpl = 0;
1348 else if (svm->vmcb->save.rflags & X86_EFLAGS_VM)
1349 cpl = 3;
1350 else
1351 cpl = svm->vmcb->save.cs.selector & 0x3;
1352
1353 svm->vmcb->save.cpl = cpl;
1354 }
1355
1356 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1357 {
1358 return to_svm(vcpu)->vmcb->save.rflags;
1359 }
1360
1361 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1362 {
1363 unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags;
1364
1365 to_svm(vcpu)->vmcb->save.rflags = rflags;
1366 if ((old_rflags ^ rflags) & X86_EFLAGS_VM)
1367 svm_update_cpl(vcpu);
1368 }
1369
1370 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1371 {
1372 switch (reg) {
1373 case VCPU_EXREG_PDPTR:
1374 BUG_ON(!npt_enabled);
1375 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1376 break;
1377 default:
1378 BUG();
1379 }
1380 }
1381
1382 static void svm_set_vintr(struct vcpu_svm *svm)
1383 {
1384 set_intercept(svm, INTERCEPT_VINTR);
1385 }
1386
1387 static void svm_clear_vintr(struct vcpu_svm *svm)
1388 {
1389 clr_intercept(svm, INTERCEPT_VINTR);
1390 }
1391
1392 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1393 {
1394 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1395
1396 switch (seg) {
1397 case VCPU_SREG_CS: return &save->cs;
1398 case VCPU_SREG_DS: return &save->ds;
1399 case VCPU_SREG_ES: return &save->es;
1400 case VCPU_SREG_FS: return &save->fs;
1401 case VCPU_SREG_GS: return &save->gs;
1402 case VCPU_SREG_SS: return &save->ss;
1403 case VCPU_SREG_TR: return &save->tr;
1404 case VCPU_SREG_LDTR: return &save->ldtr;
1405 }
1406 BUG();
1407 return NULL;
1408 }
1409
1410 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1411 {
1412 struct vmcb_seg *s = svm_seg(vcpu, seg);
1413
1414 return s->base;
1415 }
1416
1417 static void svm_get_segment(struct kvm_vcpu *vcpu,
1418 struct kvm_segment *var, int seg)
1419 {
1420 struct vmcb_seg *s = svm_seg(vcpu, seg);
1421
1422 var->base = s->base;
1423 var->limit = s->limit;
1424 var->selector = s->selector;
1425 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1426 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1427 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1428 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1429 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1430 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1431 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1432 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
1433
1434 /*
1435 * AMD's VMCB does not have an explicit unusable field, so emulate it
1436 * for cross vendor migration purposes by "not present"
1437 */
1438 var->unusable = !var->present || (var->type == 0);
1439
1440 switch (seg) {
1441 case VCPU_SREG_CS:
1442 /*
1443 * SVM always stores 0 for the 'G' bit in the CS selector in
1444 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1445 * Intel's VMENTRY has a check on the 'G' bit.
1446 */
1447 var->g = s->limit > 0xfffff;
1448 break;
1449 case VCPU_SREG_TR:
1450 /*
1451 * Work around a bug where the busy flag in the tr selector
1452 * isn't exposed
1453 */
1454 var->type |= 0x2;
1455 break;
1456 case VCPU_SREG_DS:
1457 case VCPU_SREG_ES:
1458 case VCPU_SREG_FS:
1459 case VCPU_SREG_GS:
1460 /*
1461 * The accessed bit must always be set in the segment
1462 * descriptor cache, although it can be cleared in the
1463 * descriptor, the cached bit always remains at 1. Since
1464 * Intel has a check on this, set it here to support
1465 * cross-vendor migration.
1466 */
1467 if (!var->unusable)
1468 var->type |= 0x1;
1469 break;
1470 case VCPU_SREG_SS:
1471 /*
1472 * On AMD CPUs sometimes the DB bit in the segment
1473 * descriptor is left as 1, although the whole segment has
1474 * been made unusable. Clear it here to pass an Intel VMX
1475 * entry check when cross vendor migrating.
1476 */
1477 if (var->unusable)
1478 var->db = 0;
1479 break;
1480 }
1481 }
1482
1483 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1484 {
1485 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1486
1487 return save->cpl;
1488 }
1489
1490 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1491 {
1492 struct vcpu_svm *svm = to_svm(vcpu);
1493
1494 dt->size = svm->vmcb->save.idtr.limit;
1495 dt->address = svm->vmcb->save.idtr.base;
1496 }
1497
1498 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1499 {
1500 struct vcpu_svm *svm = to_svm(vcpu);
1501
1502 svm->vmcb->save.idtr.limit = dt->size;
1503 svm->vmcb->save.idtr.base = dt->address ;
1504 mark_dirty(svm->vmcb, VMCB_DT);
1505 }
1506
1507 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1508 {
1509 struct vcpu_svm *svm = to_svm(vcpu);
1510
1511 dt->size = svm->vmcb->save.gdtr.limit;
1512 dt->address = svm->vmcb->save.gdtr.base;
1513 }
1514
1515 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1516 {
1517 struct vcpu_svm *svm = to_svm(vcpu);
1518
1519 svm->vmcb->save.gdtr.limit = dt->size;
1520 svm->vmcb->save.gdtr.base = dt->address ;
1521 mark_dirty(svm->vmcb, VMCB_DT);
1522 }
1523
1524 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1525 {
1526 }
1527
1528 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1529 {
1530 }
1531
1532 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1533 {
1534 }
1535
1536 static void update_cr0_intercept(struct vcpu_svm *svm)
1537 {
1538 ulong gcr0 = svm->vcpu.arch.cr0;
1539 u64 *hcr0 = &svm->vmcb->save.cr0;
1540
1541 if (!svm->vcpu.fpu_active)
1542 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1543 else
1544 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1545 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1546
1547 mark_dirty(svm->vmcb, VMCB_CR);
1548
1549 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1550 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1551 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1552 } else {
1553 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1554 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1555 }
1556 }
1557
1558 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1559 {
1560 struct vcpu_svm *svm = to_svm(vcpu);
1561
1562 #ifdef CONFIG_X86_64
1563 if (vcpu->arch.efer & EFER_LME) {
1564 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1565 vcpu->arch.efer |= EFER_LMA;
1566 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1567 }
1568
1569 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1570 vcpu->arch.efer &= ~EFER_LMA;
1571 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1572 }
1573 }
1574 #endif
1575 vcpu->arch.cr0 = cr0;
1576
1577 if (!npt_enabled)
1578 cr0 |= X86_CR0_PG | X86_CR0_WP;
1579
1580 if (!vcpu->fpu_active)
1581 cr0 |= X86_CR0_TS;
1582 /*
1583 * re-enable caching here because the QEMU bios
1584 * does not do it - this results in some delay at
1585 * reboot
1586 */
1587 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1588 svm->vmcb->save.cr0 = cr0;
1589 mark_dirty(svm->vmcb, VMCB_CR);
1590 update_cr0_intercept(svm);
1591 }
1592
1593 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1594 {
1595 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
1596 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1597
1598 if (cr4 & X86_CR4_VMXE)
1599 return 1;
1600
1601 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1602 svm_flush_tlb(vcpu);
1603
1604 vcpu->arch.cr4 = cr4;
1605 if (!npt_enabled)
1606 cr4 |= X86_CR4_PAE;
1607 cr4 |= host_cr4_mce;
1608 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1609 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1610 return 0;
1611 }
1612
1613 static void svm_set_segment(struct kvm_vcpu *vcpu,
1614 struct kvm_segment *var, int seg)
1615 {
1616 struct vcpu_svm *svm = to_svm(vcpu);
1617 struct vmcb_seg *s = svm_seg(vcpu, seg);
1618
1619 s->base = var->base;
1620 s->limit = var->limit;
1621 s->selector = var->selector;
1622 if (var->unusable)
1623 s->attrib = 0;
1624 else {
1625 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1626 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1627 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1628 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1629 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1630 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1631 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1632 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1633 }
1634 if (seg == VCPU_SREG_CS)
1635 svm_update_cpl(vcpu);
1636
1637 mark_dirty(svm->vmcb, VMCB_SEG);
1638 }
1639
1640 static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
1641 {
1642 struct vcpu_svm *svm = to_svm(vcpu);
1643
1644 clr_exception_intercept(svm, DB_VECTOR);
1645 clr_exception_intercept(svm, BP_VECTOR);
1646
1647 if (svm->nmi_singlestep)
1648 set_exception_intercept(svm, DB_VECTOR);
1649
1650 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1651 if (vcpu->guest_debug &
1652 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1653 set_exception_intercept(svm, DB_VECTOR);
1654 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1655 set_exception_intercept(svm, BP_VECTOR);
1656 } else
1657 vcpu->guest_debug = 0;
1658 }
1659
1660 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1661 {
1662 if (sd->next_asid > sd->max_asid) {
1663 ++sd->asid_generation;
1664 sd->next_asid = 1;
1665 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1666 }
1667
1668 svm->asid_generation = sd->asid_generation;
1669 svm->vmcb->control.asid = sd->next_asid++;
1670
1671 mark_dirty(svm->vmcb, VMCB_ASID);
1672 }
1673
1674 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1675 {
1676 return to_svm(vcpu)->vmcb->save.dr6;
1677 }
1678
1679 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1680 {
1681 struct vcpu_svm *svm = to_svm(vcpu);
1682
1683 svm->vmcb->save.dr6 = value;
1684 mark_dirty(svm->vmcb, VMCB_DR);
1685 }
1686
1687 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1688 {
1689 struct vcpu_svm *svm = to_svm(vcpu);
1690
1691 get_debugreg(vcpu->arch.db[0], 0);
1692 get_debugreg(vcpu->arch.db[1], 1);
1693 get_debugreg(vcpu->arch.db[2], 2);
1694 get_debugreg(vcpu->arch.db[3], 3);
1695 vcpu->arch.dr6 = svm_get_dr6(vcpu);
1696 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1697
1698 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1699 set_dr_intercepts(svm);
1700 }
1701
1702 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1703 {
1704 struct vcpu_svm *svm = to_svm(vcpu);
1705
1706 svm->vmcb->save.dr7 = value;
1707 mark_dirty(svm->vmcb, VMCB_DR);
1708 }
1709
1710 static int pf_interception(struct vcpu_svm *svm)
1711 {
1712 u64 fault_address = svm->vmcb->control.exit_info_2;
1713 u32 error_code;
1714 int r = 1;
1715
1716 switch (svm->apf_reason) {
1717 default:
1718 error_code = svm->vmcb->control.exit_info_1;
1719
1720 trace_kvm_page_fault(fault_address, error_code);
1721 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1722 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1723 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1724 svm->vmcb->control.insn_bytes,
1725 svm->vmcb->control.insn_len);
1726 break;
1727 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1728 svm->apf_reason = 0;
1729 local_irq_disable();
1730 kvm_async_pf_task_wait(fault_address);
1731 local_irq_enable();
1732 break;
1733 case KVM_PV_REASON_PAGE_READY:
1734 svm->apf_reason = 0;
1735 local_irq_disable();
1736 kvm_async_pf_task_wake(fault_address);
1737 local_irq_enable();
1738 break;
1739 }
1740 return r;
1741 }
1742
1743 static int db_interception(struct vcpu_svm *svm)
1744 {
1745 struct kvm_run *kvm_run = svm->vcpu.run;
1746
1747 if (!(svm->vcpu.guest_debug &
1748 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1749 !svm->nmi_singlestep) {
1750 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1751 return 1;
1752 }
1753
1754 if (svm->nmi_singlestep) {
1755 svm->nmi_singlestep = false;
1756 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1757 svm->vmcb->save.rflags &=
1758 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1759 update_db_bp_intercept(&svm->vcpu);
1760 }
1761
1762 if (svm->vcpu.guest_debug &
1763 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1764 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1765 kvm_run->debug.arch.pc =
1766 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1767 kvm_run->debug.arch.exception = DB_VECTOR;
1768 return 0;
1769 }
1770
1771 return 1;
1772 }
1773
1774 static int bp_interception(struct vcpu_svm *svm)
1775 {
1776 struct kvm_run *kvm_run = svm->vcpu.run;
1777
1778 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1779 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1780 kvm_run->debug.arch.exception = BP_VECTOR;
1781 return 0;
1782 }
1783
1784 static int ud_interception(struct vcpu_svm *svm)
1785 {
1786 int er;
1787
1788 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
1789 if (er != EMULATE_DONE)
1790 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1791 return 1;
1792 }
1793
1794 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1795 {
1796 struct vcpu_svm *svm = to_svm(vcpu);
1797
1798 clr_exception_intercept(svm, NM_VECTOR);
1799
1800 svm->vcpu.fpu_active = 1;
1801 update_cr0_intercept(svm);
1802 }
1803
1804 static int nm_interception(struct vcpu_svm *svm)
1805 {
1806 svm_fpu_activate(&svm->vcpu);
1807 return 1;
1808 }
1809
1810 static bool is_erratum_383(void)
1811 {
1812 int err, i;
1813 u64 value;
1814
1815 if (!erratum_383_found)
1816 return false;
1817
1818 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1819 if (err)
1820 return false;
1821
1822 /* Bit 62 may or may not be set for this mce */
1823 value &= ~(1ULL << 62);
1824
1825 if (value != 0xb600000000010015ULL)
1826 return false;
1827
1828 /* Clear MCi_STATUS registers */
1829 for (i = 0; i < 6; ++i)
1830 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1831
1832 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1833 if (!err) {
1834 u32 low, high;
1835
1836 value &= ~(1ULL << 2);
1837 low = lower_32_bits(value);
1838 high = upper_32_bits(value);
1839
1840 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1841 }
1842
1843 /* Flush tlb to evict multi-match entries */
1844 __flush_tlb_all();
1845
1846 return true;
1847 }
1848
1849 static void svm_handle_mce(struct vcpu_svm *svm)
1850 {
1851 if (is_erratum_383()) {
1852 /*
1853 * Erratum 383 triggered. Guest state is corrupt so kill the
1854 * guest.
1855 */
1856 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1857
1858 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1859
1860 return;
1861 }
1862
1863 /*
1864 * On an #MC intercept the MCE handler is not called automatically in
1865 * the host. So do it by hand here.
1866 */
1867 asm volatile (
1868 "int $0x12\n");
1869 /* not sure if we ever come back to this point */
1870
1871 return;
1872 }
1873
1874 static int mc_interception(struct vcpu_svm *svm)
1875 {
1876 return 1;
1877 }
1878
1879 static int shutdown_interception(struct vcpu_svm *svm)
1880 {
1881 struct kvm_run *kvm_run = svm->vcpu.run;
1882
1883 /*
1884 * VMCB is undefined after a SHUTDOWN intercept
1885 * so reinitialize it.
1886 */
1887 clear_page(svm->vmcb);
1888 init_vmcb(svm);
1889
1890 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1891 return 0;
1892 }
1893
1894 static int io_interception(struct vcpu_svm *svm)
1895 {
1896 struct kvm_vcpu *vcpu = &svm->vcpu;
1897 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1898 int size, in, string;
1899 unsigned port;
1900
1901 ++svm->vcpu.stat.io_exits;
1902 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1903 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1904 if (string || in)
1905 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
1906
1907 port = io_info >> 16;
1908 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1909 svm->next_rip = svm->vmcb->control.exit_info_2;
1910 skip_emulated_instruction(&svm->vcpu);
1911
1912 return kvm_fast_pio_out(vcpu, size, port);
1913 }
1914
1915 static int nmi_interception(struct vcpu_svm *svm)
1916 {
1917 return 1;
1918 }
1919
1920 static int intr_interception(struct vcpu_svm *svm)
1921 {
1922 ++svm->vcpu.stat.irq_exits;
1923 return 1;
1924 }
1925
1926 static int nop_on_interception(struct vcpu_svm *svm)
1927 {
1928 return 1;
1929 }
1930
1931 static int halt_interception(struct vcpu_svm *svm)
1932 {
1933 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1934 skip_emulated_instruction(&svm->vcpu);
1935 return kvm_emulate_halt(&svm->vcpu);
1936 }
1937
1938 static int vmmcall_interception(struct vcpu_svm *svm)
1939 {
1940 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1941 skip_emulated_instruction(&svm->vcpu);
1942 kvm_emulate_hypercall(&svm->vcpu);
1943 return 1;
1944 }
1945
1946 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1947 {
1948 struct vcpu_svm *svm = to_svm(vcpu);
1949
1950 return svm->nested.nested_cr3;
1951 }
1952
1953 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1954 {
1955 struct vcpu_svm *svm = to_svm(vcpu);
1956 u64 cr3 = svm->nested.nested_cr3;
1957 u64 pdpte;
1958 int ret;
1959
1960 ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
1961 offset_in_page(cr3) + index * 8, 8);
1962 if (ret)
1963 return 0;
1964 return pdpte;
1965 }
1966
1967 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1968 unsigned long root)
1969 {
1970 struct vcpu_svm *svm = to_svm(vcpu);
1971
1972 svm->vmcb->control.nested_cr3 = root;
1973 mark_dirty(svm->vmcb, VMCB_NPT);
1974 svm_flush_tlb(vcpu);
1975 }
1976
1977 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1978 struct x86_exception *fault)
1979 {
1980 struct vcpu_svm *svm = to_svm(vcpu);
1981
1982 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1983 svm->vmcb->control.exit_code_hi = 0;
1984 svm->vmcb->control.exit_info_1 = fault->error_code;
1985 svm->vmcb->control.exit_info_2 = fault->address;
1986
1987 nested_svm_vmexit(svm);
1988 }
1989
1990 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1991 {
1992 kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1993
1994 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1995 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1996 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
1997 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1998 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1999 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
2000 }
2001
2002 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2003 {
2004 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2005 }
2006
2007 static int nested_svm_check_permissions(struct vcpu_svm *svm)
2008 {
2009 if (!(svm->vcpu.arch.efer & EFER_SVME)
2010 || !is_paging(&svm->vcpu)) {
2011 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2012 return 1;
2013 }
2014
2015 if (svm->vmcb->save.cpl) {
2016 kvm_inject_gp(&svm->vcpu, 0);
2017 return 1;
2018 }
2019
2020 return 0;
2021 }
2022
2023 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2024 bool has_error_code, u32 error_code)
2025 {
2026 int vmexit;
2027
2028 if (!is_guest_mode(&svm->vcpu))
2029 return 0;
2030
2031 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2032 svm->vmcb->control.exit_code_hi = 0;
2033 svm->vmcb->control.exit_info_1 = error_code;
2034 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2035
2036 vmexit = nested_svm_intercept(svm);
2037 if (vmexit == NESTED_EXIT_DONE)
2038 svm->nested.exit_required = true;
2039
2040 return vmexit;
2041 }
2042
2043 /* This function returns true if it is save to enable the irq window */
2044 static inline bool nested_svm_intr(struct vcpu_svm *svm)
2045 {
2046 if (!is_guest_mode(&svm->vcpu))
2047 return true;
2048
2049 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2050 return true;
2051
2052 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
2053 return false;
2054
2055 /*
2056 * if vmexit was already requested (by intercepted exception
2057 * for instance) do not overwrite it with "external interrupt"
2058 * vmexit.
2059 */
2060 if (svm->nested.exit_required)
2061 return false;
2062
2063 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2064 svm->vmcb->control.exit_info_1 = 0;
2065 svm->vmcb->control.exit_info_2 = 0;
2066
2067 if (svm->nested.intercept & 1ULL) {
2068 /*
2069 * The #vmexit can't be emulated here directly because this
2070 * code path runs with irqs and preemption disabled. A
2071 * #vmexit emulation might sleep. Only signal request for
2072 * the #vmexit here.
2073 */
2074 svm->nested.exit_required = true;
2075 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2076 return false;
2077 }
2078
2079 return true;
2080 }
2081
2082 /* This function returns true if it is save to enable the nmi window */
2083 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2084 {
2085 if (!is_guest_mode(&svm->vcpu))
2086 return true;
2087
2088 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2089 return true;
2090
2091 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2092 svm->nested.exit_required = true;
2093
2094 return false;
2095 }
2096
2097 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2098 {
2099 struct page *page;
2100
2101 might_sleep();
2102
2103 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
2104 if (is_error_page(page))
2105 goto error;
2106
2107 *_page = page;
2108
2109 return kmap(page);
2110
2111 error:
2112 kvm_inject_gp(&svm->vcpu, 0);
2113
2114 return NULL;
2115 }
2116
2117 static void nested_svm_unmap(struct page *page)
2118 {
2119 kunmap(page);
2120 kvm_release_page_dirty(page);
2121 }
2122
2123 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
2124 {
2125 unsigned port;
2126 u8 val, bit;
2127 u64 gpa;
2128
2129 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2130 return NESTED_EXIT_HOST;
2131
2132 port = svm->vmcb->control.exit_info_1 >> 16;
2133 gpa = svm->nested.vmcb_iopm + (port / 8);
2134 bit = port % 8;
2135 val = 0;
2136
2137 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
2138 val &= (1 << bit);
2139
2140 return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2141 }
2142
2143 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2144 {
2145 u32 offset, msr, value;
2146 int write, mask;
2147
2148 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2149 return NESTED_EXIT_HOST;
2150
2151 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2152 offset = svm_msrpm_offset(msr);
2153 write = svm->vmcb->control.exit_info_1 & 1;
2154 mask = 1 << ((2 * (msr & 0xf)) + write);
2155
2156 if (offset == MSR_INVALID)
2157 return NESTED_EXIT_DONE;
2158
2159 /* Offset is in 32 bit units but need in 8 bit units */
2160 offset *= 4;
2161
2162 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
2163 return NESTED_EXIT_DONE;
2164
2165 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2166 }
2167
2168 static int nested_svm_exit_special(struct vcpu_svm *svm)
2169 {
2170 u32 exit_code = svm->vmcb->control.exit_code;
2171
2172 switch (exit_code) {
2173 case SVM_EXIT_INTR:
2174 case SVM_EXIT_NMI:
2175 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2176 return NESTED_EXIT_HOST;
2177 case SVM_EXIT_NPF:
2178 /* For now we are always handling NPFs when using them */
2179 if (npt_enabled)
2180 return NESTED_EXIT_HOST;
2181 break;
2182 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
2183 /* When we're shadowing, trap PFs, but not async PF */
2184 if (!npt_enabled && svm->apf_reason == 0)
2185 return NESTED_EXIT_HOST;
2186 break;
2187 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2188 nm_interception(svm);
2189 break;
2190 default:
2191 break;
2192 }
2193
2194 return NESTED_EXIT_CONTINUE;
2195 }
2196
2197 /*
2198 * If this function returns true, this #vmexit was already handled
2199 */
2200 static int nested_svm_intercept(struct vcpu_svm *svm)
2201 {
2202 u32 exit_code = svm->vmcb->control.exit_code;
2203 int vmexit = NESTED_EXIT_HOST;
2204
2205 switch (exit_code) {
2206 case SVM_EXIT_MSR:
2207 vmexit = nested_svm_exit_handled_msr(svm);
2208 break;
2209 case SVM_EXIT_IOIO:
2210 vmexit = nested_svm_intercept_ioio(svm);
2211 break;
2212 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2213 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2214 if (svm->nested.intercept_cr & bit)
2215 vmexit = NESTED_EXIT_DONE;
2216 break;
2217 }
2218 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2219 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2220 if (svm->nested.intercept_dr & bit)
2221 vmexit = NESTED_EXIT_DONE;
2222 break;
2223 }
2224 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2225 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
2226 if (svm->nested.intercept_exceptions & excp_bits)
2227 vmexit = NESTED_EXIT_DONE;
2228 /* async page fault always cause vmexit */
2229 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2230 svm->apf_reason != 0)
2231 vmexit = NESTED_EXIT_DONE;
2232 break;
2233 }
2234 case SVM_EXIT_ERR: {
2235 vmexit = NESTED_EXIT_DONE;
2236 break;
2237 }
2238 default: {
2239 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
2240 if (svm->nested.intercept & exit_bits)
2241 vmexit = NESTED_EXIT_DONE;
2242 }
2243 }
2244
2245 return vmexit;
2246 }
2247
2248 static int nested_svm_exit_handled(struct vcpu_svm *svm)
2249 {
2250 int vmexit;
2251
2252 vmexit = nested_svm_intercept(svm);
2253
2254 if (vmexit == NESTED_EXIT_DONE)
2255 nested_svm_vmexit(svm);
2256
2257 return vmexit;
2258 }
2259
2260 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2261 {
2262 struct vmcb_control_area *dst = &dst_vmcb->control;
2263 struct vmcb_control_area *from = &from_vmcb->control;
2264
2265 dst->intercept_cr = from->intercept_cr;
2266 dst->intercept_dr = from->intercept_dr;
2267 dst->intercept_exceptions = from->intercept_exceptions;
2268 dst->intercept = from->intercept;
2269 dst->iopm_base_pa = from->iopm_base_pa;
2270 dst->msrpm_base_pa = from->msrpm_base_pa;
2271 dst->tsc_offset = from->tsc_offset;
2272 dst->asid = from->asid;
2273 dst->tlb_ctl = from->tlb_ctl;
2274 dst->int_ctl = from->int_ctl;
2275 dst->int_vector = from->int_vector;
2276 dst->int_state = from->int_state;
2277 dst->exit_code = from->exit_code;
2278 dst->exit_code_hi = from->exit_code_hi;
2279 dst->exit_info_1 = from->exit_info_1;
2280 dst->exit_info_2 = from->exit_info_2;
2281 dst->exit_int_info = from->exit_int_info;
2282 dst->exit_int_info_err = from->exit_int_info_err;
2283 dst->nested_ctl = from->nested_ctl;
2284 dst->event_inj = from->event_inj;
2285 dst->event_inj_err = from->event_inj_err;
2286 dst->nested_cr3 = from->nested_cr3;
2287 dst->lbr_ctl = from->lbr_ctl;
2288 }
2289
2290 static int nested_svm_vmexit(struct vcpu_svm *svm)
2291 {
2292 struct vmcb *nested_vmcb;
2293 struct vmcb *hsave = svm->nested.hsave;
2294 struct vmcb *vmcb = svm->vmcb;
2295 struct page *page;
2296
2297 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2298 vmcb->control.exit_info_1,
2299 vmcb->control.exit_info_2,
2300 vmcb->control.exit_int_info,
2301 vmcb->control.exit_int_info_err,
2302 KVM_ISA_SVM);
2303
2304 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2305 if (!nested_vmcb)
2306 return 1;
2307
2308 /* Exit Guest-Mode */
2309 leave_guest_mode(&svm->vcpu);
2310 svm->nested.vmcb = 0;
2311
2312 /* Give the current vmcb to the guest */
2313 disable_gif(svm);
2314
2315 nested_vmcb->save.es = vmcb->save.es;
2316 nested_vmcb->save.cs = vmcb->save.cs;
2317 nested_vmcb->save.ss = vmcb->save.ss;
2318 nested_vmcb->save.ds = vmcb->save.ds;
2319 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2320 nested_vmcb->save.idtr = vmcb->save.idtr;
2321 nested_vmcb->save.efer = svm->vcpu.arch.efer;
2322 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
2323 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
2324 nested_vmcb->save.cr2 = vmcb->save.cr2;
2325 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
2326 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2327 nested_vmcb->save.rip = vmcb->save.rip;
2328 nested_vmcb->save.rsp = vmcb->save.rsp;
2329 nested_vmcb->save.rax = vmcb->save.rax;
2330 nested_vmcb->save.dr7 = vmcb->save.dr7;
2331 nested_vmcb->save.dr6 = vmcb->save.dr6;
2332 nested_vmcb->save.cpl = vmcb->save.cpl;
2333
2334 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2335 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2336 nested_vmcb->control.int_state = vmcb->control.int_state;
2337 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2338 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2339 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2340 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2341 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2342 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2343 nested_vmcb->control.next_rip = vmcb->control.next_rip;
2344
2345 /*
2346 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2347 * to make sure that we do not lose injected events. So check event_inj
2348 * here and copy it to exit_int_info if it is valid.
2349 * Exit_int_info and event_inj can't be both valid because the case
2350 * below only happens on a VMRUN instruction intercept which has
2351 * no valid exit_int_info set.
2352 */
2353 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2354 struct vmcb_control_area *nc = &nested_vmcb->control;
2355
2356 nc->exit_int_info = vmcb->control.event_inj;
2357 nc->exit_int_info_err = vmcb->control.event_inj_err;
2358 }
2359
2360 nested_vmcb->control.tlb_ctl = 0;
2361 nested_vmcb->control.event_inj = 0;
2362 nested_vmcb->control.event_inj_err = 0;
2363
2364 /* We always set V_INTR_MASKING and remember the old value in hflags */
2365 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2366 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2367
2368 /* Restore the original control entries */
2369 copy_vmcb_control_area(vmcb, hsave);
2370
2371 kvm_clear_exception_queue(&svm->vcpu);
2372 kvm_clear_interrupt_queue(&svm->vcpu);
2373
2374 svm->nested.nested_cr3 = 0;
2375
2376 /* Restore selected save entries */
2377 svm->vmcb->save.es = hsave->save.es;
2378 svm->vmcb->save.cs = hsave->save.cs;
2379 svm->vmcb->save.ss = hsave->save.ss;
2380 svm->vmcb->save.ds = hsave->save.ds;
2381 svm->vmcb->save.gdtr = hsave->save.gdtr;
2382 svm->vmcb->save.idtr = hsave->save.idtr;
2383 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2384 svm_set_efer(&svm->vcpu, hsave->save.efer);
2385 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2386 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2387 if (npt_enabled) {
2388 svm->vmcb->save.cr3 = hsave->save.cr3;
2389 svm->vcpu.arch.cr3 = hsave->save.cr3;
2390 } else {
2391 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2392 }
2393 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2394 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2395 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2396 svm->vmcb->save.dr7 = 0;
2397 svm->vmcb->save.cpl = 0;
2398 svm->vmcb->control.exit_int_info = 0;
2399
2400 mark_all_dirty(svm->vmcb);
2401
2402 nested_svm_unmap(page);
2403
2404 nested_svm_uninit_mmu_context(&svm->vcpu);
2405 kvm_mmu_reset_context(&svm->vcpu);
2406 kvm_mmu_load(&svm->vcpu);
2407
2408 return 0;
2409 }
2410
2411 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
2412 {
2413 /*
2414 * This function merges the msr permission bitmaps of kvm and the
2415 * nested vmcb. It is optimized in that it only merges the parts where
2416 * the kvm msr permission bitmap may contain zero bits
2417 */
2418 int i;
2419
2420 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2421 return true;
2422
2423 for (i = 0; i < MSRPM_OFFSETS; i++) {
2424 u32 value, p;
2425 u64 offset;
2426
2427 if (msrpm_offsets[i] == 0xffffffff)
2428 break;
2429
2430 p = msrpm_offsets[i];
2431 offset = svm->nested.vmcb_msrpm + (p * 4);
2432
2433 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2434 return false;
2435
2436 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2437 }
2438
2439 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2440
2441 return true;
2442 }
2443
2444 static bool nested_vmcb_checks(struct vmcb *vmcb)
2445 {
2446 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2447 return false;
2448
2449 if (vmcb->control.asid == 0)
2450 return false;
2451
2452 if (vmcb->control.nested_ctl && !npt_enabled)
2453 return false;
2454
2455 return true;
2456 }
2457
2458 static bool nested_svm_vmrun(struct vcpu_svm *svm)
2459 {
2460 struct vmcb *nested_vmcb;
2461 struct vmcb *hsave = svm->nested.hsave;
2462 struct vmcb *vmcb = svm->vmcb;
2463 struct page *page;
2464 u64 vmcb_gpa;
2465
2466 vmcb_gpa = svm->vmcb->save.rax;
2467
2468 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2469 if (!nested_vmcb)
2470 return false;
2471
2472 if (!nested_vmcb_checks(nested_vmcb)) {
2473 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2474 nested_vmcb->control.exit_code_hi = 0;
2475 nested_vmcb->control.exit_info_1 = 0;
2476 nested_vmcb->control.exit_info_2 = 0;
2477
2478 nested_svm_unmap(page);
2479
2480 return false;
2481 }
2482
2483 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2484 nested_vmcb->save.rip,
2485 nested_vmcb->control.int_ctl,
2486 nested_vmcb->control.event_inj,
2487 nested_vmcb->control.nested_ctl);
2488
2489 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2490 nested_vmcb->control.intercept_cr >> 16,
2491 nested_vmcb->control.intercept_exceptions,
2492 nested_vmcb->control.intercept);
2493
2494 /* Clear internal status */
2495 kvm_clear_exception_queue(&svm->vcpu);
2496 kvm_clear_interrupt_queue(&svm->vcpu);
2497
2498 /*
2499 * Save the old vmcb, so we don't need to pick what we save, but can
2500 * restore everything when a VMEXIT occurs
2501 */
2502 hsave->save.es = vmcb->save.es;
2503 hsave->save.cs = vmcb->save.cs;
2504 hsave->save.ss = vmcb->save.ss;
2505 hsave->save.ds = vmcb->save.ds;
2506 hsave->save.gdtr = vmcb->save.gdtr;
2507 hsave->save.idtr = vmcb->save.idtr;
2508 hsave->save.efer = svm->vcpu.arch.efer;
2509 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
2510 hsave->save.cr4 = svm->vcpu.arch.cr4;
2511 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2512 hsave->save.rip = kvm_rip_read(&svm->vcpu);
2513 hsave->save.rsp = vmcb->save.rsp;
2514 hsave->save.rax = vmcb->save.rax;
2515 if (npt_enabled)
2516 hsave->save.cr3 = vmcb->save.cr3;
2517 else
2518 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
2519
2520 copy_vmcb_control_area(hsave, vmcb);
2521
2522 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
2523 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2524 else
2525 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2526
2527 if (nested_vmcb->control.nested_ctl) {
2528 kvm_mmu_unload(&svm->vcpu);
2529 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2530 nested_svm_init_mmu_context(&svm->vcpu);
2531 }
2532
2533 /* Load the nested guest state */
2534 svm->vmcb->save.es = nested_vmcb->save.es;
2535 svm->vmcb->save.cs = nested_vmcb->save.cs;
2536 svm->vmcb->save.ss = nested_vmcb->save.ss;
2537 svm->vmcb->save.ds = nested_vmcb->save.ds;
2538 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2539 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2540 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
2541 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2542 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2543 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2544 if (npt_enabled) {
2545 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2546 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2547 } else
2548 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2549
2550 /* Guest paging mode is active - reset mmu */
2551 kvm_mmu_reset_context(&svm->vcpu);
2552
2553 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
2554 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2555 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2556 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
2557
2558 /* In case we don't even reach vcpu_run, the fields are not updated */
2559 svm->vmcb->save.rax = nested_vmcb->save.rax;
2560 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2561 svm->vmcb->save.rip = nested_vmcb->save.rip;
2562 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2563 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2564 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2565
2566 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
2567 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
2568
2569 /* cache intercepts */
2570 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
2571 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
2572 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2573 svm->nested.intercept = nested_vmcb->control.intercept;
2574
2575 svm_flush_tlb(&svm->vcpu);
2576 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
2577 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2578 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2579 else
2580 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2581
2582 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2583 /* We only want the cr8 intercept bits of the guest */
2584 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2585 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2586 }
2587
2588 /* We don't want to see VMMCALLs from a nested guest */
2589 clr_intercept(svm, INTERCEPT_VMMCALL);
2590
2591 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
2592 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2593 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2594 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
2595 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2596 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2597
2598 nested_svm_unmap(page);
2599
2600 /* Enter Guest-Mode */
2601 enter_guest_mode(&svm->vcpu);
2602
2603 /*
2604 * Merge guest and host intercepts - must be called with vcpu in
2605 * guest-mode to take affect here
2606 */
2607 recalc_intercepts(svm);
2608
2609 svm->nested.vmcb = vmcb_gpa;
2610
2611 enable_gif(svm);
2612
2613 mark_all_dirty(svm->vmcb);
2614
2615 return true;
2616 }
2617
2618 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
2619 {
2620 to_vmcb->save.fs = from_vmcb->save.fs;
2621 to_vmcb->save.gs = from_vmcb->save.gs;
2622 to_vmcb->save.tr = from_vmcb->save.tr;
2623 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2624 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2625 to_vmcb->save.star = from_vmcb->save.star;
2626 to_vmcb->save.lstar = from_vmcb->save.lstar;
2627 to_vmcb->save.cstar = from_vmcb->save.cstar;
2628 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2629 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2630 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2631 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
2632 }
2633
2634 static int vmload_interception(struct vcpu_svm *svm)
2635 {
2636 struct vmcb *nested_vmcb;
2637 struct page *page;
2638
2639 if (nested_svm_check_permissions(svm))
2640 return 1;
2641
2642 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2643 if (!nested_vmcb)
2644 return 1;
2645
2646 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2647 skip_emulated_instruction(&svm->vcpu);
2648
2649 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2650 nested_svm_unmap(page);
2651
2652 return 1;
2653 }
2654
2655 static int vmsave_interception(struct vcpu_svm *svm)
2656 {
2657 struct vmcb *nested_vmcb;
2658 struct page *page;
2659
2660 if (nested_svm_check_permissions(svm))
2661 return 1;
2662
2663 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2664 if (!nested_vmcb)
2665 return 1;
2666
2667 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2668 skip_emulated_instruction(&svm->vcpu);
2669
2670 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2671 nested_svm_unmap(page);
2672
2673 return 1;
2674 }
2675
2676 static int vmrun_interception(struct vcpu_svm *svm)
2677 {
2678 if (nested_svm_check_permissions(svm))
2679 return 1;
2680
2681 /* Save rip after vmrun instruction */
2682 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
2683
2684 if (!nested_svm_vmrun(svm))
2685 return 1;
2686
2687 if (!nested_svm_vmrun_msrpm(svm))
2688 goto failed;
2689
2690 return 1;
2691
2692 failed:
2693
2694 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2695 svm->vmcb->control.exit_code_hi = 0;
2696 svm->vmcb->control.exit_info_1 = 0;
2697 svm->vmcb->control.exit_info_2 = 0;
2698
2699 nested_svm_vmexit(svm);
2700
2701 return 1;
2702 }
2703
2704 static int stgi_interception(struct vcpu_svm *svm)
2705 {
2706 if (nested_svm_check_permissions(svm))
2707 return 1;
2708
2709 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2710 skip_emulated_instruction(&svm->vcpu);
2711 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2712
2713 enable_gif(svm);
2714
2715 return 1;
2716 }
2717
2718 static int clgi_interception(struct vcpu_svm *svm)
2719 {
2720 if (nested_svm_check_permissions(svm))
2721 return 1;
2722
2723 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2724 skip_emulated_instruction(&svm->vcpu);
2725
2726 disable_gif(svm);
2727
2728 /* After a CLGI no interrupts should come */
2729 svm_clear_vintr(svm);
2730 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2731
2732 mark_dirty(svm->vmcb, VMCB_INTR);
2733
2734 return 1;
2735 }
2736
2737 static int invlpga_interception(struct vcpu_svm *svm)
2738 {
2739 struct kvm_vcpu *vcpu = &svm->vcpu;
2740
2741 trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2742 vcpu->arch.regs[VCPU_REGS_RAX]);
2743
2744 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2745 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2746
2747 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2748 skip_emulated_instruction(&svm->vcpu);
2749 return 1;
2750 }
2751
2752 static int skinit_interception(struct vcpu_svm *svm)
2753 {
2754 trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2755
2756 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2757 return 1;
2758 }
2759
2760 static int xsetbv_interception(struct vcpu_svm *svm)
2761 {
2762 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2763 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2764
2765 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2766 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2767 skip_emulated_instruction(&svm->vcpu);
2768 }
2769
2770 return 1;
2771 }
2772
2773 static int task_switch_interception(struct vcpu_svm *svm)
2774 {
2775 u16 tss_selector;
2776 int reason;
2777 int int_type = svm->vmcb->control.exit_int_info &
2778 SVM_EXITINTINFO_TYPE_MASK;
2779 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2780 uint32_t type =
2781 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2782 uint32_t idt_v =
2783 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2784 bool has_error_code = false;
2785 u32 error_code = 0;
2786
2787 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2788
2789 if (svm->vmcb->control.exit_info_2 &
2790 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2791 reason = TASK_SWITCH_IRET;
2792 else if (svm->vmcb->control.exit_info_2 &
2793 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2794 reason = TASK_SWITCH_JMP;
2795 else if (idt_v)
2796 reason = TASK_SWITCH_GATE;
2797 else
2798 reason = TASK_SWITCH_CALL;
2799
2800 if (reason == TASK_SWITCH_GATE) {
2801 switch (type) {
2802 case SVM_EXITINTINFO_TYPE_NMI:
2803 svm->vcpu.arch.nmi_injected = false;
2804 break;
2805 case SVM_EXITINTINFO_TYPE_EXEPT:
2806 if (svm->vmcb->control.exit_info_2 &
2807 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2808 has_error_code = true;
2809 error_code =
2810 (u32)svm->vmcb->control.exit_info_2;
2811 }
2812 kvm_clear_exception_queue(&svm->vcpu);
2813 break;
2814 case SVM_EXITINTINFO_TYPE_INTR:
2815 kvm_clear_interrupt_queue(&svm->vcpu);
2816 break;
2817 default:
2818 break;
2819 }
2820 }
2821
2822 if (reason != TASK_SWITCH_GATE ||
2823 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2824 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2825 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2826 skip_emulated_instruction(&svm->vcpu);
2827
2828 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2829 int_vec = -1;
2830
2831 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2832 has_error_code, error_code) == EMULATE_FAIL) {
2833 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2834 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2835 svm->vcpu.run->internal.ndata = 0;
2836 return 0;
2837 }
2838 return 1;
2839 }
2840
2841 static int cpuid_interception(struct vcpu_svm *svm)
2842 {
2843 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2844 kvm_emulate_cpuid(&svm->vcpu);
2845 return 1;
2846 }
2847
2848 static int iret_interception(struct vcpu_svm *svm)
2849 {
2850 ++svm->vcpu.stat.nmi_window_exits;
2851 clr_intercept(svm, INTERCEPT_IRET);
2852 svm->vcpu.arch.hflags |= HF_IRET_MASK;
2853 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2854 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2855 return 1;
2856 }
2857
2858 static int invlpg_interception(struct vcpu_svm *svm)
2859 {
2860 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2861 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2862
2863 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2864 skip_emulated_instruction(&svm->vcpu);
2865 return 1;
2866 }
2867
2868 static int emulate_on_interception(struct vcpu_svm *svm)
2869 {
2870 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2871 }
2872
2873 static int rdpmc_interception(struct vcpu_svm *svm)
2874 {
2875 int err;
2876
2877 if (!static_cpu_has(X86_FEATURE_NRIPS))
2878 return emulate_on_interception(svm);
2879
2880 err = kvm_rdpmc(&svm->vcpu);
2881 kvm_complete_insn_gp(&svm->vcpu, err);
2882
2883 return 1;
2884 }
2885
2886 bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
2887 {
2888 unsigned long cr0 = svm->vcpu.arch.cr0;
2889 bool ret = false;
2890 u64 intercept;
2891
2892 intercept = svm->nested.intercept;
2893
2894 if (!is_guest_mode(&svm->vcpu) ||
2895 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2896 return false;
2897
2898 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2899 val &= ~SVM_CR0_SELECTIVE_MASK;
2900
2901 if (cr0 ^ val) {
2902 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2903 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2904 }
2905
2906 return ret;
2907 }
2908
2909 #define CR_VALID (1ULL << 63)
2910
2911 static int cr_interception(struct vcpu_svm *svm)
2912 {
2913 int reg, cr;
2914 unsigned long val;
2915 int err;
2916
2917 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2918 return emulate_on_interception(svm);
2919
2920 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2921 return emulate_on_interception(svm);
2922
2923 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2924 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2925
2926 err = 0;
2927 if (cr >= 16) { /* mov to cr */
2928 cr -= 16;
2929 val = kvm_register_read(&svm->vcpu, reg);
2930 switch (cr) {
2931 case 0:
2932 if (!check_selective_cr0_intercepted(svm, val))
2933 err = kvm_set_cr0(&svm->vcpu, val);
2934 else
2935 return 1;
2936
2937 break;
2938 case 3:
2939 err = kvm_set_cr3(&svm->vcpu, val);
2940 break;
2941 case 4:
2942 err = kvm_set_cr4(&svm->vcpu, val);
2943 break;
2944 case 8:
2945 err = kvm_set_cr8(&svm->vcpu, val);
2946 break;
2947 default:
2948 WARN(1, "unhandled write to CR%d", cr);
2949 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2950 return 1;
2951 }
2952 } else { /* mov from cr */
2953 switch (cr) {
2954 case 0:
2955 val = kvm_read_cr0(&svm->vcpu);
2956 break;
2957 case 2:
2958 val = svm->vcpu.arch.cr2;
2959 break;
2960 case 3:
2961 val = kvm_read_cr3(&svm->vcpu);
2962 break;
2963 case 4:
2964 val = kvm_read_cr4(&svm->vcpu);
2965 break;
2966 case 8:
2967 val = kvm_get_cr8(&svm->vcpu);
2968 break;
2969 default:
2970 WARN(1, "unhandled read from CR%d", cr);
2971 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2972 return 1;
2973 }
2974 kvm_register_write(&svm->vcpu, reg, val);
2975 }
2976 kvm_complete_insn_gp(&svm->vcpu, err);
2977
2978 return 1;
2979 }
2980
2981 static int dr_interception(struct vcpu_svm *svm)
2982 {
2983 int reg, dr;
2984 unsigned long val;
2985 int err;
2986
2987 if (svm->vcpu.guest_debug == 0) {
2988 /*
2989 * No more DR vmexits; force a reload of the debug registers
2990 * and reenter on this instruction. The next vmexit will
2991 * retrieve the full state of the debug registers.
2992 */
2993 clr_dr_intercepts(svm);
2994 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2995 return 1;
2996 }
2997
2998 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2999 return emulate_on_interception(svm);
3000
3001 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3002 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3003
3004 if (dr >= 16) { /* mov to DRn */
3005 val = kvm_register_read(&svm->vcpu, reg);
3006 kvm_set_dr(&svm->vcpu, dr - 16, val);
3007 } else {
3008 err = kvm_get_dr(&svm->vcpu, dr, &val);
3009 if (!err)
3010 kvm_register_write(&svm->vcpu, reg, val);
3011 }
3012
3013 skip_emulated_instruction(&svm->vcpu);
3014
3015 return 1;
3016 }
3017
3018 static int cr8_write_interception(struct vcpu_svm *svm)
3019 {
3020 struct kvm_run *kvm_run = svm->vcpu.run;
3021 int r;
3022
3023 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3024 /* instruction emulation calls kvm_set_cr8() */
3025 r = cr_interception(svm);
3026 if (irqchip_in_kernel(svm->vcpu.kvm))
3027 return r;
3028 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3029 return r;
3030 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3031 return 0;
3032 }
3033
3034 u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3035 {
3036 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3037 return vmcb->control.tsc_offset +
3038 svm_scale_tsc(vcpu, host_tsc);
3039 }
3040
3041 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
3042 {
3043 struct vcpu_svm *svm = to_svm(vcpu);
3044
3045 switch (ecx) {
3046 case MSR_IA32_TSC: {
3047 *data = svm->vmcb->control.tsc_offset +
3048 svm_scale_tsc(vcpu, native_read_tsc());
3049
3050 break;
3051 }
3052 case MSR_STAR:
3053 *data = svm->vmcb->save.star;
3054 break;
3055 #ifdef CONFIG_X86_64
3056 case MSR_LSTAR:
3057 *data = svm->vmcb->save.lstar;
3058 break;
3059 case MSR_CSTAR:
3060 *data = svm->vmcb->save.cstar;
3061 break;
3062 case MSR_KERNEL_GS_BASE:
3063 *data = svm->vmcb->save.kernel_gs_base;
3064 break;
3065 case MSR_SYSCALL_MASK:
3066 *data = svm->vmcb->save.sfmask;
3067 break;
3068 #endif
3069 case MSR_IA32_SYSENTER_CS:
3070 *data = svm->vmcb->save.sysenter_cs;
3071 break;
3072 case MSR_IA32_SYSENTER_EIP:
3073 *data = svm->sysenter_eip;
3074 break;
3075 case MSR_IA32_SYSENTER_ESP:
3076 *data = svm->sysenter_esp;
3077 break;
3078 /*
3079 * Nobody will change the following 5 values in the VMCB so we can
3080 * safely return them on rdmsr. They will always be 0 until LBRV is
3081 * implemented.
3082 */
3083 case MSR_IA32_DEBUGCTLMSR:
3084 *data = svm->vmcb->save.dbgctl;
3085 break;
3086 case MSR_IA32_LASTBRANCHFROMIP:
3087 *data = svm->vmcb->save.br_from;
3088 break;
3089 case MSR_IA32_LASTBRANCHTOIP:
3090 *data = svm->vmcb->save.br_to;
3091 break;
3092 case MSR_IA32_LASTINTFROMIP:
3093 *data = svm->vmcb->save.last_excp_from;
3094 break;
3095 case MSR_IA32_LASTINTTOIP:
3096 *data = svm->vmcb->save.last_excp_to;
3097 break;
3098 case MSR_VM_HSAVE_PA:
3099 *data = svm->nested.hsave_msr;
3100 break;
3101 case MSR_VM_CR:
3102 *data = svm->nested.vm_cr_msr;
3103 break;
3104 case MSR_IA32_UCODE_REV:
3105 *data = 0x01000065;
3106 break;
3107 default:
3108 return kvm_get_msr_common(vcpu, ecx, data);
3109 }
3110 return 0;
3111 }
3112
3113 static int rdmsr_interception(struct vcpu_svm *svm)
3114 {
3115 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3116 u64 data;
3117
3118 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
3119 trace_kvm_msr_read_ex(ecx);
3120 kvm_inject_gp(&svm->vcpu, 0);
3121 } else {
3122 trace_kvm_msr_read(ecx, data);
3123
3124 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
3125 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
3126 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3127 skip_emulated_instruction(&svm->vcpu);
3128 }
3129 return 1;
3130 }
3131
3132 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3133 {
3134 struct vcpu_svm *svm = to_svm(vcpu);
3135 int svm_dis, chg_mask;
3136
3137 if (data & ~SVM_VM_CR_VALID_MASK)
3138 return 1;
3139
3140 chg_mask = SVM_VM_CR_VALID_MASK;
3141
3142 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3143 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3144
3145 svm->nested.vm_cr_msr &= ~chg_mask;
3146 svm->nested.vm_cr_msr |= (data & chg_mask);
3147
3148 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3149
3150 /* check for svm_disable while efer.svme is set */
3151 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3152 return 1;
3153
3154 return 0;
3155 }
3156
3157 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3158 {
3159 struct vcpu_svm *svm = to_svm(vcpu);
3160
3161 u32 ecx = msr->index;
3162 u64 data = msr->data;
3163 switch (ecx) {
3164 case MSR_IA32_TSC:
3165 kvm_write_tsc(vcpu, msr);
3166 break;
3167 case MSR_STAR:
3168 svm->vmcb->save.star = data;
3169 break;
3170 #ifdef CONFIG_X86_64
3171 case MSR_LSTAR:
3172 svm->vmcb->save.lstar = data;
3173 break;
3174 case MSR_CSTAR:
3175 svm->vmcb->save.cstar = data;
3176 break;
3177 case MSR_KERNEL_GS_BASE:
3178 svm->vmcb->save.kernel_gs_base = data;
3179 break;
3180 case MSR_SYSCALL_MASK:
3181 svm->vmcb->save.sfmask = data;
3182 break;
3183 #endif
3184 case MSR_IA32_SYSENTER_CS:
3185 svm->vmcb->save.sysenter_cs = data;
3186 break;
3187 case MSR_IA32_SYSENTER_EIP:
3188 svm->sysenter_eip = data;
3189 svm->vmcb->save.sysenter_eip = data;
3190 break;
3191 case MSR_IA32_SYSENTER_ESP:
3192 svm->sysenter_esp = data;
3193 svm->vmcb->save.sysenter_esp = data;
3194 break;
3195 case MSR_IA32_DEBUGCTLMSR:
3196 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3197 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3198 __func__, data);
3199 break;
3200 }
3201 if (data & DEBUGCTL_RESERVED_BITS)
3202 return 1;
3203
3204 svm->vmcb->save.dbgctl = data;
3205 mark_dirty(svm->vmcb, VMCB_LBR);
3206 if (data & (1ULL<<0))
3207 svm_enable_lbrv(svm);
3208 else
3209 svm_disable_lbrv(svm);
3210 break;
3211 case MSR_VM_HSAVE_PA:
3212 svm->nested.hsave_msr = data;
3213 break;
3214 case MSR_VM_CR:
3215 return svm_set_vm_cr(vcpu, data);
3216 case MSR_VM_IGNNE:
3217 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3218 break;
3219 default:
3220 return kvm_set_msr_common(vcpu, msr);
3221 }
3222 return 0;
3223 }
3224
3225 static int wrmsr_interception(struct vcpu_svm *svm)
3226 {
3227 struct msr_data msr;
3228 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3229 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
3230 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3231
3232 msr.data = data;
3233 msr.index = ecx;
3234 msr.host_initiated = false;
3235
3236 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3237 if (svm_set_msr(&svm->vcpu, &msr)) {
3238 trace_kvm_msr_write_ex(ecx, data);
3239 kvm_inject_gp(&svm->vcpu, 0);
3240 } else {
3241 trace_kvm_msr_write(ecx, data);
3242 skip_emulated_instruction(&svm->vcpu);
3243 }
3244 return 1;
3245 }
3246
3247 static int msr_interception(struct vcpu_svm *svm)
3248 {
3249 if (svm->vmcb->control.exit_info_1)
3250 return wrmsr_interception(svm);
3251 else
3252 return rdmsr_interception(svm);
3253 }
3254
3255 static int interrupt_window_interception(struct vcpu_svm *svm)
3256 {
3257 struct kvm_run *kvm_run = svm->vcpu.run;
3258
3259 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3260 svm_clear_vintr(svm);
3261 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3262 mark_dirty(svm->vmcb, VMCB_INTR);
3263 ++svm->vcpu.stat.irq_window_exits;
3264 /*
3265 * If the user space waits to inject interrupts, exit as soon as
3266 * possible
3267 */
3268 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3269 kvm_run->request_interrupt_window &&
3270 !kvm_cpu_has_interrupt(&svm->vcpu)) {
3271 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3272 return 0;
3273 }
3274
3275 return 1;
3276 }
3277
3278 static int pause_interception(struct vcpu_svm *svm)
3279 {
3280 kvm_vcpu_on_spin(&(svm->vcpu));
3281 return 1;
3282 }
3283
3284 static int nop_interception(struct vcpu_svm *svm)
3285 {
3286 skip_emulated_instruction(&(svm->vcpu));
3287 return 1;
3288 }
3289
3290 static int monitor_interception(struct vcpu_svm *svm)
3291 {
3292 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
3293 return nop_interception(svm);
3294 }
3295
3296 static int mwait_interception(struct vcpu_svm *svm)
3297 {
3298 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
3299 return nop_interception(svm);
3300 }
3301
3302 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3303 [SVM_EXIT_READ_CR0] = cr_interception,
3304 [SVM_EXIT_READ_CR3] = cr_interception,
3305 [SVM_EXIT_READ_CR4] = cr_interception,
3306 [SVM_EXIT_READ_CR8] = cr_interception,
3307 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
3308 [SVM_EXIT_WRITE_CR0] = cr_interception,
3309 [SVM_EXIT_WRITE_CR3] = cr_interception,
3310 [SVM_EXIT_WRITE_CR4] = cr_interception,
3311 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
3312 [SVM_EXIT_READ_DR0] = dr_interception,
3313 [SVM_EXIT_READ_DR1] = dr_interception,
3314 [SVM_EXIT_READ_DR2] = dr_interception,
3315 [SVM_EXIT_READ_DR3] = dr_interception,
3316 [SVM_EXIT_READ_DR4] = dr_interception,
3317 [SVM_EXIT_READ_DR5] = dr_interception,
3318 [SVM_EXIT_READ_DR6] = dr_interception,
3319 [SVM_EXIT_READ_DR7] = dr_interception,
3320 [SVM_EXIT_WRITE_DR0] = dr_interception,
3321 [SVM_EXIT_WRITE_DR1] = dr_interception,
3322 [SVM_EXIT_WRITE_DR2] = dr_interception,
3323 [SVM_EXIT_WRITE_DR3] = dr_interception,
3324 [SVM_EXIT_WRITE_DR4] = dr_interception,
3325 [SVM_EXIT_WRITE_DR5] = dr_interception,
3326 [SVM_EXIT_WRITE_DR6] = dr_interception,
3327 [SVM_EXIT_WRITE_DR7] = dr_interception,
3328 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3329 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
3330 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
3331 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3332 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3333 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3334 [SVM_EXIT_INTR] = intr_interception,
3335 [SVM_EXIT_NMI] = nmi_interception,
3336 [SVM_EXIT_SMI] = nop_on_interception,
3337 [SVM_EXIT_INIT] = nop_on_interception,
3338 [SVM_EXIT_VINTR] = interrupt_window_interception,
3339 [SVM_EXIT_RDPMC] = rdpmc_interception,
3340 [SVM_EXIT_CPUID] = cpuid_interception,
3341 [SVM_EXIT_IRET] = iret_interception,
3342 [SVM_EXIT_INVD] = emulate_on_interception,
3343 [SVM_EXIT_PAUSE] = pause_interception,
3344 [SVM_EXIT_HLT] = halt_interception,
3345 [SVM_EXIT_INVLPG] = invlpg_interception,
3346 [SVM_EXIT_INVLPGA] = invlpga_interception,
3347 [SVM_EXIT_IOIO] = io_interception,
3348 [SVM_EXIT_MSR] = msr_interception,
3349 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
3350 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3351 [SVM_EXIT_VMRUN] = vmrun_interception,
3352 [SVM_EXIT_VMMCALL] = vmmcall_interception,
3353 [SVM_EXIT_VMLOAD] = vmload_interception,
3354 [SVM_EXIT_VMSAVE] = vmsave_interception,
3355 [SVM_EXIT_STGI] = stgi_interception,
3356 [SVM_EXIT_CLGI] = clgi_interception,
3357 [SVM_EXIT_SKINIT] = skinit_interception,
3358 [SVM_EXIT_WBINVD] = emulate_on_interception,
3359 [SVM_EXIT_MONITOR] = monitor_interception,
3360 [SVM_EXIT_MWAIT] = mwait_interception,
3361 [SVM_EXIT_XSETBV] = xsetbv_interception,
3362 [SVM_EXIT_NPF] = pf_interception,
3363 };
3364
3365 static void dump_vmcb(struct kvm_vcpu *vcpu)
3366 {
3367 struct vcpu_svm *svm = to_svm(vcpu);
3368 struct vmcb_control_area *control = &svm->vmcb->control;
3369 struct vmcb_save_area *save = &svm->vmcb->save;
3370
3371 pr_err("VMCB Control Area:\n");
3372 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
3373 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
3374 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
3375 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
3376 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
3377 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
3378 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3379 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3380 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3381 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3382 pr_err("%-20s%d\n", "asid:", control->asid);
3383 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3384 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3385 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3386 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3387 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3388 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3389 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3390 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3391 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3392 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3393 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3394 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3395 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3396 pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
3397 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3398 pr_err("VMCB State Save Area:\n");
3399 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3400 "es:",
3401 save->es.selector, save->es.attrib,
3402 save->es.limit, save->es.base);
3403 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3404 "cs:",
3405 save->cs.selector, save->cs.attrib,
3406 save->cs.limit, save->cs.base);
3407 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3408 "ss:",
3409 save->ss.selector, save->ss.attrib,
3410 save->ss.limit, save->ss.base);
3411 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3412 "ds:",
3413 save->ds.selector, save->ds.attrib,
3414 save->ds.limit, save->ds.base);
3415 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3416 "fs:",
3417 save->fs.selector, save->fs.attrib,
3418 save->fs.limit, save->fs.base);
3419 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3420 "gs:",
3421 save->gs.selector, save->gs.attrib,
3422 save->gs.limit, save->gs.base);
3423 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3424 "gdtr:",
3425 save->gdtr.selector, save->gdtr.attrib,
3426 save->gdtr.limit, save->gdtr.base);
3427 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3428 "ldtr:",
3429 save->ldtr.selector, save->ldtr.attrib,
3430 save->ldtr.limit, save->ldtr.base);
3431 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3432 "idtr:",
3433 save->idtr.selector, save->idtr.attrib,
3434 save->idtr.limit, save->idtr.base);
3435 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3436 "tr:",
3437 save->tr.selector, save->tr.attrib,
3438 save->tr.limit, save->tr.base);
3439 pr_err("cpl: %d efer: %016llx\n",
3440 save->cpl, save->efer);
3441 pr_err("%-15s %016llx %-13s %016llx\n",
3442 "cr0:", save->cr0, "cr2:", save->cr2);
3443 pr_err("%-15s %016llx %-13s %016llx\n",
3444 "cr3:", save->cr3, "cr4:", save->cr4);
3445 pr_err("%-15s %016llx %-13s %016llx\n",
3446 "dr6:", save->dr6, "dr7:", save->dr7);
3447 pr_err("%-15s %016llx %-13s %016llx\n",
3448 "rip:", save->rip, "rflags:", save->rflags);
3449 pr_err("%-15s %016llx %-13s %016llx\n",
3450 "rsp:", save->rsp, "rax:", save->rax);
3451 pr_err("%-15s %016llx %-13s %016llx\n",
3452 "star:", save->star, "lstar:", save->lstar);
3453 pr_err("%-15s %016llx %-13s %016llx\n",
3454 "cstar:", save->cstar, "sfmask:", save->sfmask);
3455 pr_err("%-15s %016llx %-13s %016llx\n",
3456 "kernel_gs_base:", save->kernel_gs_base,
3457 "sysenter_cs:", save->sysenter_cs);
3458 pr_err("%-15s %016llx %-13s %016llx\n",
3459 "sysenter_esp:", save->sysenter_esp,
3460 "sysenter_eip:", save->sysenter_eip);
3461 pr_err("%-15s %016llx %-13s %016llx\n",
3462 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3463 pr_err("%-15s %016llx %-13s %016llx\n",
3464 "br_from:", save->br_from, "br_to:", save->br_to);
3465 pr_err("%-15s %016llx %-13s %016llx\n",
3466 "excp_from:", save->last_excp_from,
3467 "excp_to:", save->last_excp_to);
3468 }
3469
3470 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3471 {
3472 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3473
3474 *info1 = control->exit_info_1;
3475 *info2 = control->exit_info_2;
3476 }
3477
3478 static int handle_exit(struct kvm_vcpu *vcpu)
3479 {
3480 struct vcpu_svm *svm = to_svm(vcpu);
3481 struct kvm_run *kvm_run = vcpu->run;
3482 u32 exit_code = svm->vmcb->control.exit_code;
3483
3484 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3485 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3486 if (npt_enabled)
3487 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3488
3489 if (unlikely(svm->nested.exit_required)) {
3490 nested_svm_vmexit(svm);
3491 svm->nested.exit_required = false;
3492
3493 return 1;
3494 }
3495
3496 if (is_guest_mode(vcpu)) {
3497 int vmexit;
3498
3499 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3500 svm->vmcb->control.exit_info_1,
3501 svm->vmcb->control.exit_info_2,
3502 svm->vmcb->control.exit_int_info,
3503 svm->vmcb->control.exit_int_info_err,
3504 KVM_ISA_SVM);
3505
3506 vmexit = nested_svm_exit_special(svm);
3507
3508 if (vmexit == NESTED_EXIT_CONTINUE)
3509 vmexit = nested_svm_exit_handled(svm);
3510
3511 if (vmexit == NESTED_EXIT_DONE)
3512 return 1;
3513 }
3514
3515 svm_complete_interrupts(svm);
3516
3517 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3518 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3519 kvm_run->fail_entry.hardware_entry_failure_reason
3520 = svm->vmcb->control.exit_code;
3521 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3522 dump_vmcb(vcpu);
3523 return 0;
3524 }
3525
3526 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3527 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3528 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3529 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3530 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
3531 "exit_code 0x%x\n",
3532 __func__, svm->vmcb->control.exit_int_info,
3533 exit_code);
3534
3535 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
3536 || !svm_exit_handlers[exit_code]) {
3537 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3538 kvm_run->hw.hardware_exit_reason = exit_code;
3539 return 0;
3540 }
3541
3542 return svm_exit_handlers[exit_code](svm);
3543 }
3544
3545 static void reload_tss(struct kvm_vcpu *vcpu)
3546 {
3547 int cpu = raw_smp_processor_id();
3548
3549 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3550 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
3551 load_TR_desc();
3552 }
3553
3554 static void pre_svm_run(struct vcpu_svm *svm)
3555 {
3556 int cpu = raw_smp_processor_id();
3557
3558 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3559
3560 /* FIXME: handle wraparound of asid_generation */
3561 if (svm->asid_generation != sd->asid_generation)
3562 new_asid(svm, sd);
3563 }
3564
3565 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3566 {
3567 struct vcpu_svm *svm = to_svm(vcpu);
3568
3569 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3570 vcpu->arch.hflags |= HF_NMI_MASK;
3571 set_intercept(svm, INTERCEPT_IRET);
3572 ++vcpu->stat.nmi_injections;
3573 }
3574
3575 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
3576 {
3577 struct vmcb_control_area *control;
3578
3579 control = &svm->vmcb->control;
3580 control->int_vector = irq;
3581 control->int_ctl &= ~V_INTR_PRIO_MASK;
3582 control->int_ctl |= V_IRQ_MASK |
3583 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
3584 mark_dirty(svm->vmcb, VMCB_INTR);
3585 }
3586
3587 static void svm_set_irq(struct kvm_vcpu *vcpu)
3588 {
3589 struct vcpu_svm *svm = to_svm(vcpu);
3590
3591 BUG_ON(!(gif_set(svm)));
3592
3593 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3594 ++vcpu->stat.irq_injections;
3595
3596 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3597 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
3598 }
3599
3600 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3601 {
3602 struct vcpu_svm *svm = to_svm(vcpu);
3603
3604 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3605 return;
3606
3607 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3608
3609 if (irr == -1)
3610 return;
3611
3612 if (tpr >= irr)
3613 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3614 }
3615
3616 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3617 {
3618 return;
3619 }
3620
3621 static int svm_vm_has_apicv(struct kvm *kvm)
3622 {
3623 return 0;
3624 }
3625
3626 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
3627 {
3628 return;
3629 }
3630
3631 static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3632 {
3633 return;
3634 }
3635
3636 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3637 {
3638 return;
3639 }
3640
3641 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3642 {
3643 struct vcpu_svm *svm = to_svm(vcpu);
3644 struct vmcb *vmcb = svm->vmcb;
3645 int ret;
3646 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3647 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3648 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3649
3650 return ret;
3651 }
3652
3653 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3654 {
3655 struct vcpu_svm *svm = to_svm(vcpu);
3656
3657 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3658 }
3659
3660 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3661 {
3662 struct vcpu_svm *svm = to_svm(vcpu);
3663
3664 if (masked) {
3665 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3666 set_intercept(svm, INTERCEPT_IRET);
3667 } else {
3668 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3669 clr_intercept(svm, INTERCEPT_IRET);
3670 }
3671 }
3672
3673 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3674 {
3675 struct vcpu_svm *svm = to_svm(vcpu);
3676 struct vmcb *vmcb = svm->vmcb;
3677 int ret;
3678
3679 if (!gif_set(svm) ||
3680 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3681 return 0;
3682
3683 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3684
3685 if (is_guest_mode(vcpu))
3686 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3687
3688 return ret;
3689 }
3690
3691 static void enable_irq_window(struct kvm_vcpu *vcpu)
3692 {
3693 struct vcpu_svm *svm = to_svm(vcpu);
3694
3695 /*
3696 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3697 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3698 * get that intercept, this function will be called again though and
3699 * we'll get the vintr intercept.
3700 */
3701 if (gif_set(svm) && nested_svm_intr(svm)) {
3702 svm_set_vintr(svm);
3703 svm_inject_irq(svm, 0x0);
3704 }
3705 }
3706
3707 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3708 {
3709 struct vcpu_svm *svm = to_svm(vcpu);
3710
3711 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3712 == HF_NMI_MASK)
3713 return; /* IRET will cause a vm exit */
3714
3715 /*
3716 * Something prevents NMI from been injected. Single step over possible
3717 * problem (IRET or exception injection or interrupt shadow)
3718 */
3719 svm->nmi_singlestep = true;
3720 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3721 update_db_bp_intercept(vcpu);
3722 }
3723
3724 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3725 {
3726 return 0;
3727 }
3728
3729 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3730 {
3731 struct vcpu_svm *svm = to_svm(vcpu);
3732
3733 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3734 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3735 else
3736 svm->asid_generation--;
3737 }
3738
3739 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3740 {
3741 }
3742
3743 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3744 {
3745 struct vcpu_svm *svm = to_svm(vcpu);
3746
3747 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3748 return;
3749
3750 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3751 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3752 kvm_set_cr8(vcpu, cr8);
3753 }
3754 }
3755
3756 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3757 {
3758 struct vcpu_svm *svm = to_svm(vcpu);
3759 u64 cr8;
3760
3761 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3762 return;
3763
3764 cr8 = kvm_get_cr8(vcpu);
3765 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3766 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3767 }
3768
3769 static void svm_complete_interrupts(struct vcpu_svm *svm)
3770 {
3771 u8 vector;
3772 int type;
3773 u32 exitintinfo = svm->vmcb->control.exit_int_info;
3774 unsigned int3_injected = svm->int3_injected;
3775
3776 svm->int3_injected = 0;
3777
3778 /*
3779 * If we've made progress since setting HF_IRET_MASK, we've
3780 * executed an IRET and can allow NMI injection.
3781 */
3782 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3783 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3784 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3785 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3786 }
3787
3788 svm->vcpu.arch.nmi_injected = false;
3789 kvm_clear_exception_queue(&svm->vcpu);
3790 kvm_clear_interrupt_queue(&svm->vcpu);
3791
3792 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3793 return;
3794
3795 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3796
3797 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3798 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3799
3800 switch (type) {
3801 case SVM_EXITINTINFO_TYPE_NMI:
3802 svm->vcpu.arch.nmi_injected = true;
3803 break;
3804 case SVM_EXITINTINFO_TYPE_EXEPT:
3805 /*
3806 * In case of software exceptions, do not reinject the vector,
3807 * but re-execute the instruction instead. Rewind RIP first
3808 * if we emulated INT3 before.
3809 */
3810 if (kvm_exception_is_soft(vector)) {
3811 if (vector == BP_VECTOR && int3_injected &&
3812 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3813 kvm_rip_write(&svm->vcpu,
3814 kvm_rip_read(&svm->vcpu) -
3815 int3_injected);
3816 break;
3817 }
3818 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3819 u32 err = svm->vmcb->control.exit_int_info_err;
3820 kvm_requeue_exception_e(&svm->vcpu, vector, err);
3821
3822 } else
3823 kvm_requeue_exception(&svm->vcpu, vector);
3824 break;
3825 case SVM_EXITINTINFO_TYPE_INTR:
3826 kvm_queue_interrupt(&svm->vcpu, vector, false);
3827 break;
3828 default:
3829 break;
3830 }
3831 }
3832
3833 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3834 {
3835 struct vcpu_svm *svm = to_svm(vcpu);
3836 struct vmcb_control_area *control = &svm->vmcb->control;
3837
3838 control->exit_int_info = control->event_inj;
3839 control->exit_int_info_err = control->event_inj_err;
3840 control->event_inj = 0;
3841 svm_complete_interrupts(svm);
3842 }
3843
3844 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3845 {
3846 struct vcpu_svm *svm = to_svm(vcpu);
3847
3848 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3849 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3850 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3851
3852 /*
3853 * A vmexit emulation is required before the vcpu can be executed
3854 * again.
3855 */
3856 if (unlikely(svm->nested.exit_required))
3857 return;
3858
3859 pre_svm_run(svm);
3860
3861 sync_lapic_to_cr8(vcpu);
3862
3863 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3864
3865 clgi();
3866
3867 local_irq_enable();
3868
3869 asm volatile (
3870 "push %%" _ASM_BP "; \n\t"
3871 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
3872 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
3873 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
3874 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
3875 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
3876 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
3877 #ifdef CONFIG_X86_64
3878 "mov %c[r8](%[svm]), %%r8 \n\t"
3879 "mov %c[r9](%[svm]), %%r9 \n\t"
3880 "mov %c[r10](%[svm]), %%r10 \n\t"
3881 "mov %c[r11](%[svm]), %%r11 \n\t"
3882 "mov %c[r12](%[svm]), %%r12 \n\t"
3883 "mov %c[r13](%[svm]), %%r13 \n\t"
3884 "mov %c[r14](%[svm]), %%r14 \n\t"
3885 "mov %c[r15](%[svm]), %%r15 \n\t"
3886 #endif
3887
3888 /* Enter guest mode */
3889 "push %%" _ASM_AX " \n\t"
3890 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
3891 __ex(SVM_VMLOAD) "\n\t"
3892 __ex(SVM_VMRUN) "\n\t"
3893 __ex(SVM_VMSAVE) "\n\t"
3894 "pop %%" _ASM_AX " \n\t"
3895
3896 /* Save guest registers, load host registers */
3897 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
3898 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
3899 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
3900 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
3901 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
3902 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
3903 #ifdef CONFIG_X86_64
3904 "mov %%r8, %c[r8](%[svm]) \n\t"
3905 "mov %%r9, %c[r9](%[svm]) \n\t"
3906 "mov %%r10, %c[r10](%[svm]) \n\t"
3907 "mov %%r11, %c[r11](%[svm]) \n\t"
3908 "mov %%r12, %c[r12](%[svm]) \n\t"
3909 "mov %%r13, %c[r13](%[svm]) \n\t"
3910 "mov %%r14, %c[r14](%[svm]) \n\t"
3911 "mov %%r15, %c[r15](%[svm]) \n\t"
3912 #endif
3913 "pop %%" _ASM_BP
3914 :
3915 : [svm]"a"(svm),
3916 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
3917 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3918 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3919 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3920 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3921 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3922 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
3923 #ifdef CONFIG_X86_64
3924 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3925 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3926 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3927 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3928 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3929 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3930 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3931 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
3932 #endif
3933 : "cc", "memory"
3934 #ifdef CONFIG_X86_64
3935 , "rbx", "rcx", "rdx", "rsi", "rdi"
3936 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3937 #else
3938 , "ebx", "ecx", "edx", "esi", "edi"
3939 #endif
3940 );
3941
3942 #ifdef CONFIG_X86_64
3943 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3944 #else
3945 loadsegment(fs, svm->host.fs);
3946 #ifndef CONFIG_X86_32_LAZY_GS
3947 loadsegment(gs, svm->host.gs);
3948 #endif
3949 #endif
3950
3951 reload_tss(vcpu);
3952
3953 local_irq_disable();
3954
3955 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3956 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3957 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3958 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3959
3960 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3961
3962 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3963 kvm_before_handle_nmi(&svm->vcpu);
3964
3965 stgi();
3966
3967 /* Any pending NMI will happen here */
3968
3969 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3970 kvm_after_handle_nmi(&svm->vcpu);
3971
3972 sync_cr8_to_lapic(vcpu);
3973
3974 svm->next_rip = 0;
3975
3976 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3977
3978 /* if exit due to PF check for async PF */
3979 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3980 svm->apf_reason = kvm_read_and_reset_pf_reason();
3981
3982 if (npt_enabled) {
3983 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3984 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3985 }
3986
3987 /*
3988 * We need to handle MC intercepts here before the vcpu has a chance to
3989 * change the physical cpu
3990 */
3991 if (unlikely(svm->vmcb->control.exit_code ==
3992 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3993 svm_handle_mce(svm);
3994
3995 mark_all_clean(svm->vmcb);
3996 }
3997
3998 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3999 {
4000 struct vcpu_svm *svm = to_svm(vcpu);
4001
4002 svm->vmcb->save.cr3 = root;
4003 mark_dirty(svm->vmcb, VMCB_CR);
4004 svm_flush_tlb(vcpu);
4005 }
4006
4007 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
4008 {
4009 struct vcpu_svm *svm = to_svm(vcpu);
4010
4011 svm->vmcb->control.nested_cr3 = root;
4012 mark_dirty(svm->vmcb, VMCB_NPT);
4013
4014 /* Also sync guest cr3 here in case we live migrate */
4015 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
4016 mark_dirty(svm->vmcb, VMCB_CR);
4017
4018 svm_flush_tlb(vcpu);
4019 }
4020
4021 static int is_disabled(void)
4022 {
4023 u64 vm_cr;
4024
4025 rdmsrl(MSR_VM_CR, vm_cr);
4026 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
4027 return 1;
4028
4029 return 0;
4030 }
4031
4032 static void
4033 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4034 {
4035 /*
4036 * Patch in the VMMCALL instruction:
4037 */
4038 hypercall[0] = 0x0f;
4039 hypercall[1] = 0x01;
4040 hypercall[2] = 0xd9;
4041 }
4042
4043 static void svm_check_processor_compat(void *rtn)
4044 {
4045 *(int *)rtn = 0;
4046 }
4047
4048 static bool svm_cpu_has_accelerated_tpr(void)
4049 {
4050 return false;
4051 }
4052
4053 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4054 {
4055 return 0;
4056 }
4057
4058 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4059 {
4060 }
4061
4062 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4063 {
4064 switch (func) {
4065 case 0x80000001:
4066 if (nested)
4067 entry->ecx |= (1 << 2); /* Set SVM bit */
4068 break;
4069 case 0x8000000A:
4070 entry->eax = 1; /* SVM revision 1 */
4071 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
4072 ASID emulation to nested SVM */
4073 entry->ecx = 0; /* Reserved */
4074 entry->edx = 0; /* Per default do not support any
4075 additional features */
4076
4077 /* Support next_rip if host supports it */
4078 if (boot_cpu_has(X86_FEATURE_NRIPS))
4079 entry->edx |= SVM_FEATURE_NRIP;
4080
4081 /* Support NPT for the guest if enabled */
4082 if (npt_enabled)
4083 entry->edx |= SVM_FEATURE_NPT;
4084
4085 break;
4086 }
4087 }
4088
4089 static int svm_get_lpage_level(void)
4090 {
4091 return PT_PDPE_LEVEL;
4092 }
4093
4094 static bool svm_rdtscp_supported(void)
4095 {
4096 return false;
4097 }
4098
4099 static bool svm_invpcid_supported(void)
4100 {
4101 return false;
4102 }
4103
4104 static bool svm_mpx_supported(void)
4105 {
4106 return false;
4107 }
4108
4109 static bool svm_has_wbinvd_exit(void)
4110 {
4111 return true;
4112 }
4113
4114 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
4115 {
4116 struct vcpu_svm *svm = to_svm(vcpu);
4117
4118 set_exception_intercept(svm, NM_VECTOR);
4119 update_cr0_intercept(svm);
4120 }
4121
4122 #define PRE_EX(exit) { .exit_code = (exit), \
4123 .stage = X86_ICPT_PRE_EXCEPT, }
4124 #define POST_EX(exit) { .exit_code = (exit), \
4125 .stage = X86_ICPT_POST_EXCEPT, }
4126 #define POST_MEM(exit) { .exit_code = (exit), \
4127 .stage = X86_ICPT_POST_MEMACCESS, }
4128
4129 static const struct __x86_intercept {
4130 u32 exit_code;
4131 enum x86_intercept_stage stage;
4132 } x86_intercept_map[] = {
4133 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4134 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4135 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4136 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4137 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
4138 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4139 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
4140 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4141 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4142 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4143 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4144 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4145 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4146 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4147 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
4148 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4149 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4150 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4151 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4152 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4153 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4154 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4155 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
4156 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4157 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4158 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
4159 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4160 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4161 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4162 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4163 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4164 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4165 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4166 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4167 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
4168 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4169 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4170 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4171 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4172 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4173 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4174 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
4175 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4176 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4177 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4178 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
4179 };
4180
4181 #undef PRE_EX
4182 #undef POST_EX
4183 #undef POST_MEM
4184
4185 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4186 struct x86_instruction_info *info,
4187 enum x86_intercept_stage stage)
4188 {
4189 struct vcpu_svm *svm = to_svm(vcpu);
4190 int vmexit, ret = X86EMUL_CONTINUE;
4191 struct __x86_intercept icpt_info;
4192 struct vmcb *vmcb = svm->vmcb;
4193
4194 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4195 goto out;
4196
4197 icpt_info = x86_intercept_map[info->intercept];
4198
4199 if (stage != icpt_info.stage)
4200 goto out;
4201
4202 switch (icpt_info.exit_code) {
4203 case SVM_EXIT_READ_CR0:
4204 if (info->intercept == x86_intercept_cr_read)
4205 icpt_info.exit_code += info->modrm_reg;
4206 break;
4207 case SVM_EXIT_WRITE_CR0: {
4208 unsigned long cr0, val;
4209 u64 intercept;
4210
4211 if (info->intercept == x86_intercept_cr_write)
4212 icpt_info.exit_code += info->modrm_reg;
4213
4214 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
4215 break;
4216
4217 intercept = svm->nested.intercept;
4218
4219 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4220 break;
4221
4222 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4223 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4224
4225 if (info->intercept == x86_intercept_lmsw) {
4226 cr0 &= 0xfUL;
4227 val &= 0xfUL;
4228 /* lmsw can't clear PE - catch this here */
4229 if (cr0 & X86_CR0_PE)
4230 val |= X86_CR0_PE;
4231 }
4232
4233 if (cr0 ^ val)
4234 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4235
4236 break;
4237 }
4238 case SVM_EXIT_READ_DR0:
4239 case SVM_EXIT_WRITE_DR0:
4240 icpt_info.exit_code += info->modrm_reg;
4241 break;
4242 case SVM_EXIT_MSR:
4243 if (info->intercept == x86_intercept_wrmsr)
4244 vmcb->control.exit_info_1 = 1;
4245 else
4246 vmcb->control.exit_info_1 = 0;
4247 break;
4248 case SVM_EXIT_PAUSE:
4249 /*
4250 * We get this for NOP only, but pause
4251 * is rep not, check this here
4252 */
4253 if (info->rep_prefix != REPE_PREFIX)
4254 goto out;
4255 case SVM_EXIT_IOIO: {
4256 u64 exit_info;
4257 u32 bytes;
4258
4259 exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
4260
4261 if (info->intercept == x86_intercept_in ||
4262 info->intercept == x86_intercept_ins) {
4263 exit_info |= SVM_IOIO_TYPE_MASK;
4264 bytes = info->src_bytes;
4265 } else {
4266 bytes = info->dst_bytes;
4267 }
4268
4269 if (info->intercept == x86_intercept_outs ||
4270 info->intercept == x86_intercept_ins)
4271 exit_info |= SVM_IOIO_STR_MASK;
4272
4273 if (info->rep_prefix)
4274 exit_info |= SVM_IOIO_REP_MASK;
4275
4276 bytes = min(bytes, 4u);
4277
4278 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4279
4280 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4281
4282 vmcb->control.exit_info_1 = exit_info;
4283 vmcb->control.exit_info_2 = info->next_rip;
4284
4285 break;
4286 }
4287 default:
4288 break;
4289 }
4290
4291 vmcb->control.next_rip = info->next_rip;
4292 vmcb->control.exit_code = icpt_info.exit_code;
4293 vmexit = nested_svm_exit_handled(svm);
4294
4295 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4296 : X86EMUL_CONTINUE;
4297
4298 out:
4299 return ret;
4300 }
4301
4302 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
4303 {
4304 local_irq_enable();
4305 }
4306
4307 static struct kvm_x86_ops svm_x86_ops = {
4308 .cpu_has_kvm_support = has_svm,
4309 .disabled_by_bios = is_disabled,
4310 .hardware_setup = svm_hardware_setup,
4311 .hardware_unsetup = svm_hardware_unsetup,
4312 .check_processor_compatibility = svm_check_processor_compat,
4313 .hardware_enable = svm_hardware_enable,
4314 .hardware_disable = svm_hardware_disable,
4315 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
4316
4317 .vcpu_create = svm_create_vcpu,
4318 .vcpu_free = svm_free_vcpu,
4319 .vcpu_reset = svm_vcpu_reset,
4320
4321 .prepare_guest_switch = svm_prepare_guest_switch,
4322 .vcpu_load = svm_vcpu_load,
4323 .vcpu_put = svm_vcpu_put,
4324
4325 .update_db_bp_intercept = update_db_bp_intercept,
4326 .get_msr = svm_get_msr,
4327 .set_msr = svm_set_msr,
4328 .get_segment_base = svm_get_segment_base,
4329 .get_segment = svm_get_segment,
4330 .set_segment = svm_set_segment,
4331 .get_cpl = svm_get_cpl,
4332 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4333 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4334 .decache_cr3 = svm_decache_cr3,
4335 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
4336 .set_cr0 = svm_set_cr0,
4337 .set_cr3 = svm_set_cr3,
4338 .set_cr4 = svm_set_cr4,
4339 .set_efer = svm_set_efer,
4340 .get_idt = svm_get_idt,
4341 .set_idt = svm_set_idt,
4342 .get_gdt = svm_get_gdt,
4343 .set_gdt = svm_set_gdt,
4344 .get_dr6 = svm_get_dr6,
4345 .set_dr6 = svm_set_dr6,
4346 .set_dr7 = svm_set_dr7,
4347 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4348 .cache_reg = svm_cache_reg,
4349 .get_rflags = svm_get_rflags,
4350 .set_rflags = svm_set_rflags,
4351 .fpu_activate = svm_fpu_activate,
4352 .fpu_deactivate = svm_fpu_deactivate,
4353
4354 .tlb_flush = svm_flush_tlb,
4355
4356 .run = svm_vcpu_run,
4357 .handle_exit = handle_exit,
4358 .skip_emulated_instruction = skip_emulated_instruction,
4359 .set_interrupt_shadow = svm_set_interrupt_shadow,
4360 .get_interrupt_shadow = svm_get_interrupt_shadow,
4361 .patch_hypercall = svm_patch_hypercall,
4362 .set_irq = svm_set_irq,
4363 .set_nmi = svm_inject_nmi,
4364 .queue_exception = svm_queue_exception,
4365 .cancel_injection = svm_cancel_injection,
4366 .interrupt_allowed = svm_interrupt_allowed,
4367 .nmi_allowed = svm_nmi_allowed,
4368 .get_nmi_mask = svm_get_nmi_mask,
4369 .set_nmi_mask = svm_set_nmi_mask,
4370 .enable_nmi_window = enable_nmi_window,
4371 .enable_irq_window = enable_irq_window,
4372 .update_cr8_intercept = update_cr8_intercept,
4373 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4374 .vm_has_apicv = svm_vm_has_apicv,
4375 .load_eoi_exitmap = svm_load_eoi_exitmap,
4376 .hwapic_isr_update = svm_hwapic_isr_update,
4377 .sync_pir_to_irr = svm_sync_pir_to_irr,
4378
4379 .set_tss_addr = svm_set_tss_addr,
4380 .get_tdp_level = get_npt_level,
4381 .get_mt_mask = svm_get_mt_mask,
4382
4383 .get_exit_info = svm_get_exit_info,
4384
4385 .get_lpage_level = svm_get_lpage_level,
4386
4387 .cpuid_update = svm_cpuid_update,
4388
4389 .rdtscp_supported = svm_rdtscp_supported,
4390 .invpcid_supported = svm_invpcid_supported,
4391 .mpx_supported = svm_mpx_supported,
4392
4393 .set_supported_cpuid = svm_set_supported_cpuid,
4394
4395 .has_wbinvd_exit = svm_has_wbinvd_exit,
4396
4397 .set_tsc_khz = svm_set_tsc_khz,
4398 .read_tsc_offset = svm_read_tsc_offset,
4399 .write_tsc_offset = svm_write_tsc_offset,
4400 .adjust_tsc_offset = svm_adjust_tsc_offset,
4401 .compute_tsc_offset = svm_compute_tsc_offset,
4402 .read_l1_tsc = svm_read_l1_tsc,
4403
4404 .set_tdp_cr3 = set_tdp_cr3,
4405
4406 .check_intercept = svm_check_intercept,
4407 .handle_external_intr = svm_handle_external_intr,
4408 };
4409
4410 static int __init svm_init(void)
4411 {
4412 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
4413 __alignof__(struct vcpu_svm), THIS_MODULE);
4414 }
4415
4416 static void __exit svm_exit(void)
4417 {
4418 kvm_exit();
4419 }
4420
4421 module_init(svm_init)
4422 module_exit(svm_exit)