]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kvm/svm.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kvm / svm.c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18 #define pr_fmt(fmt) "SVM: " fmt
19
20 #include <linux/kvm_host.h>
21
22 #include "irq.h"
23 #include "mmu.h"
24 #include "kvm_cache_regs.h"
25 #include "x86.h"
26 #include "cpuid.h"
27 #include "pmu.h"
28
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/kernel.h>
32 #include <linux/vmalloc.h>
33 #include <linux/highmem.h>
34 #include <linux/sched.h>
35 #include <linux/trace_events.h>
36 #include <linux/slab.h>
37 #include <linux/amd-iommu.h>
38 #include <linux/hashtable.h>
39 #include <linux/frame.h>
40
41 #include <asm/apic.h>
42 #include <asm/perf_event.h>
43 #include <asm/tlbflush.h>
44 #include <asm/desc.h>
45 #include <asm/debugreg.h>
46 #include <asm/kvm_para.h>
47 #include <asm/irq_remapping.h>
48
49 #include <asm/virtext.h>
50 #include "trace.h"
51
52 #define __ex(x) __kvm_handle_fault_on_reboot(x)
53
54 MODULE_AUTHOR("Qumranet");
55 MODULE_LICENSE("GPL");
56
57 static const struct x86_cpu_id svm_cpu_id[] = {
58 X86_FEATURE_MATCH(X86_FEATURE_SVM),
59 {}
60 };
61 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
62
63 #define IOPM_ALLOC_ORDER 2
64 #define MSRPM_ALLOC_ORDER 1
65
66 #define SEG_TYPE_LDT 2
67 #define SEG_TYPE_BUSY_TSS16 3
68
69 #define SVM_FEATURE_NPT (1 << 0)
70 #define SVM_FEATURE_LBRV (1 << 1)
71 #define SVM_FEATURE_SVML (1 << 2)
72 #define SVM_FEATURE_NRIP (1 << 3)
73 #define SVM_FEATURE_TSC_RATE (1 << 4)
74 #define SVM_FEATURE_VMCB_CLEAN (1 << 5)
75 #define SVM_FEATURE_FLUSH_ASID (1 << 6)
76 #define SVM_FEATURE_DECODE_ASSIST (1 << 7)
77 #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
78
79 #define SVM_AVIC_DOORBELL 0xc001011b
80
81 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
82 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
83 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
84
85 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
86
87 #define TSC_RATIO_RSVD 0xffffff0000000000ULL
88 #define TSC_RATIO_MIN 0x0000000000000001ULL
89 #define TSC_RATIO_MAX 0x000000ffffffffffULL
90
91 #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
92
93 /*
94 * 0xff is broadcast, so the max index allowed for physical APIC ID
95 * table is 0xfe. APIC IDs above 0xff are reserved.
96 */
97 #define AVIC_MAX_PHYSICAL_ID_COUNT 255
98
99 #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
100 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
101 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
102
103 /* AVIC GATAG is encoded using VM and VCPU IDs */
104 #define AVIC_VCPU_ID_BITS 8
105 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
106
107 #define AVIC_VM_ID_BITS 24
108 #define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
109 #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
110
111 #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
112 (y & AVIC_VCPU_ID_MASK))
113 #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
114 #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
115
116 static bool erratum_383_found __read_mostly;
117
118 static const u32 host_save_user_msrs[] = {
119 #ifdef CONFIG_X86_64
120 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
121 MSR_FS_BASE,
122 #endif
123 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
124 MSR_TSC_AUX,
125 };
126
127 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
128
129 struct kvm_vcpu;
130
131 struct nested_state {
132 struct vmcb *hsave;
133 u64 hsave_msr;
134 u64 vm_cr_msr;
135 u64 vmcb;
136
137 /* These are the merged vectors */
138 u32 *msrpm;
139
140 /* gpa pointers to the real vectors */
141 u64 vmcb_msrpm;
142 u64 vmcb_iopm;
143
144 /* A VMEXIT is required but not yet emulated */
145 bool exit_required;
146
147 /* cache for intercepts of the guest */
148 u32 intercept_cr;
149 u32 intercept_dr;
150 u32 intercept_exceptions;
151 u64 intercept;
152
153 /* Nested Paging related state */
154 u64 nested_cr3;
155 };
156
157 #define MSRPM_OFFSETS 16
158 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
159
160 /*
161 * Set osvw_len to higher value when updated Revision Guides
162 * are published and we know what the new status bits are
163 */
164 static uint64_t osvw_len = 4, osvw_status;
165
166 struct vcpu_svm {
167 struct kvm_vcpu vcpu;
168 struct vmcb *vmcb;
169 unsigned long vmcb_pa;
170 struct svm_cpu_data *svm_data;
171 uint64_t asid_generation;
172 uint64_t sysenter_esp;
173 uint64_t sysenter_eip;
174 uint64_t tsc_aux;
175
176 u64 next_rip;
177
178 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
179 struct {
180 u16 fs;
181 u16 gs;
182 u16 ldt;
183 u64 gs_base;
184 } host;
185
186 u32 *msrpm;
187
188 ulong nmi_iret_rip;
189
190 struct nested_state nested;
191
192 bool nmi_singlestep;
193 u64 nmi_singlestep_guest_rflags;
194
195 unsigned int3_injected;
196 unsigned long int3_rip;
197
198 /* cached guest cpuid flags for faster access */
199 bool nrips_enabled : 1;
200
201 u32 ldr_reg;
202 struct page *avic_backing_page;
203 u64 *avic_physical_id_cache;
204 bool avic_is_running;
205
206 /*
207 * Per-vcpu list of struct amd_svm_iommu_ir:
208 * This is used mainly to store interrupt remapping information used
209 * when update the vcpu affinity. This avoids the need to scan for
210 * IRTE and try to match ga_tag in the IOMMU driver.
211 */
212 struct list_head ir_list;
213 spinlock_t ir_list_lock;
214 };
215
216 /*
217 * This is a wrapper of struct amd_iommu_ir_data.
218 */
219 struct amd_svm_iommu_ir {
220 struct list_head node; /* Used by SVM for per-vcpu ir_list */
221 void *data; /* Storing pointer to struct amd_ir_data */
222 };
223
224 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
225 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
226
227 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
228 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
229 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
230 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
231
232 static DEFINE_PER_CPU(u64, current_tsc_ratio);
233 #define TSC_RATIO_DEFAULT 0x0100000000ULL
234
235 #define MSR_INVALID 0xffffffffU
236
237 static const struct svm_direct_access_msrs {
238 u32 index; /* Index of the MSR */
239 bool always; /* True if intercept is always on */
240 } direct_access_msrs[] = {
241 { .index = MSR_STAR, .always = true },
242 { .index = MSR_IA32_SYSENTER_CS, .always = true },
243 #ifdef CONFIG_X86_64
244 { .index = MSR_GS_BASE, .always = true },
245 { .index = MSR_FS_BASE, .always = true },
246 { .index = MSR_KERNEL_GS_BASE, .always = true },
247 { .index = MSR_LSTAR, .always = true },
248 { .index = MSR_CSTAR, .always = true },
249 { .index = MSR_SYSCALL_MASK, .always = true },
250 #endif
251 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
252 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
253 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
254 { .index = MSR_IA32_LASTINTTOIP, .always = false },
255 { .index = MSR_INVALID, .always = false },
256 };
257
258 /* enable NPT for AMD64 and X86 with PAE */
259 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
260 static bool npt_enabled = true;
261 #else
262 static bool npt_enabled;
263 #endif
264
265 /* allow nested paging (virtualized MMU) for all guests */
266 static int npt = true;
267 module_param(npt, int, S_IRUGO);
268
269 /* allow nested virtualization in KVM/SVM */
270 static int nested = true;
271 module_param(nested, int, S_IRUGO);
272
273 /* enable / disable AVIC */
274 static int avic;
275 #ifdef CONFIG_X86_LOCAL_APIC
276 module_param(avic, int, S_IRUGO);
277 #endif
278
279 /* enable/disable Virtual VMLOAD VMSAVE */
280 static int vls = true;
281 module_param(vls, int, 0444);
282
283 /* AVIC VM ID bit masks and lock */
284 static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
285 static DEFINE_SPINLOCK(avic_vm_id_lock);
286
287 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
288 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
289 static void svm_complete_interrupts(struct vcpu_svm *svm);
290
291 static int nested_svm_exit_handled(struct vcpu_svm *svm);
292 static int nested_svm_intercept(struct vcpu_svm *svm);
293 static int nested_svm_vmexit(struct vcpu_svm *svm);
294 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
295 bool has_error_code, u32 error_code);
296
297 enum {
298 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
299 pause filter count */
300 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
301 VMCB_ASID, /* ASID */
302 VMCB_INTR, /* int_ctl, int_vector */
303 VMCB_NPT, /* npt_en, nCR3, gPAT */
304 VMCB_CR, /* CR0, CR3, CR4, EFER */
305 VMCB_DR, /* DR6, DR7 */
306 VMCB_DT, /* GDT, IDT */
307 VMCB_SEG, /* CS, DS, SS, ES, CPL */
308 VMCB_CR2, /* CR2 only */
309 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
310 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
311 * AVIC PHYSICAL_TABLE pointer,
312 * AVIC LOGICAL_TABLE pointer
313 */
314 VMCB_DIRTY_MAX,
315 };
316
317 /* TPR and CR2 are always written before VMRUN */
318 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
319
320 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
321
322 static inline void mark_all_dirty(struct vmcb *vmcb)
323 {
324 vmcb->control.clean = 0;
325 }
326
327 static inline void mark_all_clean(struct vmcb *vmcb)
328 {
329 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
330 & ~VMCB_ALWAYS_DIRTY_MASK;
331 }
332
333 static inline void mark_dirty(struct vmcb *vmcb, int bit)
334 {
335 vmcb->control.clean &= ~(1 << bit);
336 }
337
338 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
339 {
340 return container_of(vcpu, struct vcpu_svm, vcpu);
341 }
342
343 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
344 {
345 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
346 mark_dirty(svm->vmcb, VMCB_AVIC);
347 }
348
349 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
350 {
351 struct vcpu_svm *svm = to_svm(vcpu);
352 u64 *entry = svm->avic_physical_id_cache;
353
354 if (!entry)
355 return false;
356
357 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
358 }
359
360 static void recalc_intercepts(struct vcpu_svm *svm)
361 {
362 struct vmcb_control_area *c, *h;
363 struct nested_state *g;
364
365 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
366
367 if (!is_guest_mode(&svm->vcpu))
368 return;
369
370 c = &svm->vmcb->control;
371 h = &svm->nested.hsave->control;
372 g = &svm->nested;
373
374 c->intercept_cr = h->intercept_cr | g->intercept_cr;
375 c->intercept_dr = h->intercept_dr | g->intercept_dr;
376 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
377 c->intercept = h->intercept | g->intercept;
378 }
379
380 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
381 {
382 if (is_guest_mode(&svm->vcpu))
383 return svm->nested.hsave;
384 else
385 return svm->vmcb;
386 }
387
388 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
389 {
390 struct vmcb *vmcb = get_host_vmcb(svm);
391
392 vmcb->control.intercept_cr |= (1U << bit);
393
394 recalc_intercepts(svm);
395 }
396
397 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
398 {
399 struct vmcb *vmcb = get_host_vmcb(svm);
400
401 vmcb->control.intercept_cr &= ~(1U << bit);
402
403 recalc_intercepts(svm);
404 }
405
406 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
407 {
408 struct vmcb *vmcb = get_host_vmcb(svm);
409
410 return vmcb->control.intercept_cr & (1U << bit);
411 }
412
413 static inline void set_dr_intercepts(struct vcpu_svm *svm)
414 {
415 struct vmcb *vmcb = get_host_vmcb(svm);
416
417 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
418 | (1 << INTERCEPT_DR1_READ)
419 | (1 << INTERCEPT_DR2_READ)
420 | (1 << INTERCEPT_DR3_READ)
421 | (1 << INTERCEPT_DR4_READ)
422 | (1 << INTERCEPT_DR5_READ)
423 | (1 << INTERCEPT_DR6_READ)
424 | (1 << INTERCEPT_DR7_READ)
425 | (1 << INTERCEPT_DR0_WRITE)
426 | (1 << INTERCEPT_DR1_WRITE)
427 | (1 << INTERCEPT_DR2_WRITE)
428 | (1 << INTERCEPT_DR3_WRITE)
429 | (1 << INTERCEPT_DR4_WRITE)
430 | (1 << INTERCEPT_DR5_WRITE)
431 | (1 << INTERCEPT_DR6_WRITE)
432 | (1 << INTERCEPT_DR7_WRITE);
433
434 recalc_intercepts(svm);
435 }
436
437 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
438 {
439 struct vmcb *vmcb = get_host_vmcb(svm);
440
441 vmcb->control.intercept_dr = 0;
442
443 recalc_intercepts(svm);
444 }
445
446 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
447 {
448 struct vmcb *vmcb = get_host_vmcb(svm);
449
450 vmcb->control.intercept_exceptions |= (1U << bit);
451
452 recalc_intercepts(svm);
453 }
454
455 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
456 {
457 struct vmcb *vmcb = get_host_vmcb(svm);
458
459 vmcb->control.intercept_exceptions &= ~(1U << bit);
460
461 recalc_intercepts(svm);
462 }
463
464 static inline void set_intercept(struct vcpu_svm *svm, int bit)
465 {
466 struct vmcb *vmcb = get_host_vmcb(svm);
467
468 vmcb->control.intercept |= (1ULL << bit);
469
470 recalc_intercepts(svm);
471 }
472
473 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
474 {
475 struct vmcb *vmcb = get_host_vmcb(svm);
476
477 vmcb->control.intercept &= ~(1ULL << bit);
478
479 recalc_intercepts(svm);
480 }
481
482 static inline void enable_gif(struct vcpu_svm *svm)
483 {
484 svm->vcpu.arch.hflags |= HF_GIF_MASK;
485 }
486
487 static inline void disable_gif(struct vcpu_svm *svm)
488 {
489 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
490 }
491
492 static inline bool gif_set(struct vcpu_svm *svm)
493 {
494 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
495 }
496
497 static unsigned long iopm_base;
498
499 struct kvm_ldttss_desc {
500 u16 limit0;
501 u16 base0;
502 unsigned base1:8, type:5, dpl:2, p:1;
503 unsigned limit1:4, zero0:3, g:1, base2:8;
504 u32 base3;
505 u32 zero1;
506 } __attribute__((packed));
507
508 struct svm_cpu_data {
509 int cpu;
510
511 u64 asid_generation;
512 u32 max_asid;
513 u32 next_asid;
514 struct kvm_ldttss_desc *tss_desc;
515
516 struct page *save_area;
517 };
518
519 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
520
521 struct svm_init_data {
522 int cpu;
523 int r;
524 };
525
526 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
527
528 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
529 #define MSRS_RANGE_SIZE 2048
530 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
531
532 static u32 svm_msrpm_offset(u32 msr)
533 {
534 u32 offset;
535 int i;
536
537 for (i = 0; i < NUM_MSR_MAPS; i++) {
538 if (msr < msrpm_ranges[i] ||
539 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
540 continue;
541
542 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
543 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
544
545 /* Now we have the u8 offset - but need the u32 offset */
546 return offset / 4;
547 }
548
549 /* MSR not in any range */
550 return MSR_INVALID;
551 }
552
553 #define MAX_INST_SIZE 15
554
555 static inline void clgi(void)
556 {
557 asm volatile (__ex(SVM_CLGI));
558 }
559
560 static inline void stgi(void)
561 {
562 asm volatile (__ex(SVM_STGI));
563 }
564
565 static inline void invlpga(unsigned long addr, u32 asid)
566 {
567 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
568 }
569
570 static int get_npt_level(void)
571 {
572 #ifdef CONFIG_X86_64
573 return PT64_ROOT_LEVEL;
574 #else
575 return PT32E_ROOT_LEVEL;
576 #endif
577 }
578
579 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
580 {
581 vcpu->arch.efer = efer;
582 if (!npt_enabled && !(efer & EFER_LMA))
583 efer &= ~EFER_LME;
584
585 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
586 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
587 }
588
589 static int is_external_interrupt(u32 info)
590 {
591 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
592 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
593 }
594
595 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
596 {
597 struct vcpu_svm *svm = to_svm(vcpu);
598 u32 ret = 0;
599
600 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
601 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
602 return ret;
603 }
604
605 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
606 {
607 struct vcpu_svm *svm = to_svm(vcpu);
608
609 if (mask == 0)
610 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
611 else
612 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
613
614 }
615
616 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
617 {
618 struct vcpu_svm *svm = to_svm(vcpu);
619
620 if (svm->vmcb->control.next_rip != 0) {
621 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
622 svm->next_rip = svm->vmcb->control.next_rip;
623 }
624
625 if (!svm->next_rip) {
626 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
627 EMULATE_DONE)
628 printk(KERN_DEBUG "%s: NOP\n", __func__);
629 return;
630 }
631 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
632 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
633 __func__, kvm_rip_read(vcpu), svm->next_rip);
634
635 kvm_rip_write(vcpu, svm->next_rip);
636 svm_set_interrupt_shadow(vcpu, 0);
637 }
638
639 static void svm_queue_exception(struct kvm_vcpu *vcpu)
640 {
641 struct vcpu_svm *svm = to_svm(vcpu);
642 unsigned nr = vcpu->arch.exception.nr;
643 bool has_error_code = vcpu->arch.exception.has_error_code;
644 bool reinject = vcpu->arch.exception.reinject;
645 u32 error_code = vcpu->arch.exception.error_code;
646
647 /*
648 * If we are within a nested VM we'd better #VMEXIT and let the guest
649 * handle the exception
650 */
651 if (!reinject &&
652 nested_svm_check_exception(svm, nr, has_error_code, error_code))
653 return;
654
655 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
656 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
657
658 /*
659 * For guest debugging where we have to reinject #BP if some
660 * INT3 is guest-owned:
661 * Emulate nRIP by moving RIP forward. Will fail if injection
662 * raises a fault that is not intercepted. Still better than
663 * failing in all cases.
664 */
665 skip_emulated_instruction(&svm->vcpu);
666 rip = kvm_rip_read(&svm->vcpu);
667 svm->int3_rip = rip + svm->vmcb->save.cs.base;
668 svm->int3_injected = rip - old_rip;
669 }
670
671 svm->vmcb->control.event_inj = nr
672 | SVM_EVTINJ_VALID
673 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
674 | SVM_EVTINJ_TYPE_EXEPT;
675 svm->vmcb->control.event_inj_err = error_code;
676 }
677
678 static void svm_init_erratum_383(void)
679 {
680 u32 low, high;
681 int err;
682 u64 val;
683
684 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
685 return;
686
687 /* Use _safe variants to not break nested virtualization */
688 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
689 if (err)
690 return;
691
692 val |= (1ULL << 47);
693
694 low = lower_32_bits(val);
695 high = upper_32_bits(val);
696
697 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
698
699 erratum_383_found = true;
700 }
701
702 static void svm_init_osvw(struct kvm_vcpu *vcpu)
703 {
704 /*
705 * Guests should see errata 400 and 415 as fixed (assuming that
706 * HLT and IO instructions are intercepted).
707 */
708 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
709 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
710
711 /*
712 * By increasing VCPU's osvw.length to 3 we are telling the guest that
713 * all osvw.status bits inside that length, including bit 0 (which is
714 * reserved for erratum 298), are valid. However, if host processor's
715 * osvw_len is 0 then osvw_status[0] carries no information. We need to
716 * be conservative here and therefore we tell the guest that erratum 298
717 * is present (because we really don't know).
718 */
719 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
720 vcpu->arch.osvw.status |= 1;
721 }
722
723 static int has_svm(void)
724 {
725 const char *msg;
726
727 if (!cpu_has_svm(&msg)) {
728 printk(KERN_INFO "has_svm: %s\n", msg);
729 return 0;
730 }
731
732 return 1;
733 }
734
735 static void svm_hardware_disable(void)
736 {
737 /* Make sure we clean up behind us */
738 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
739 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
740
741 cpu_svm_disable();
742
743 amd_pmu_disable_virt();
744 }
745
746 static int svm_hardware_enable(void)
747 {
748
749 struct svm_cpu_data *sd;
750 uint64_t efer;
751 struct desc_struct *gdt;
752 int me = raw_smp_processor_id();
753
754 rdmsrl(MSR_EFER, efer);
755 if (efer & EFER_SVME)
756 return -EBUSY;
757
758 if (!has_svm()) {
759 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
760 return -EINVAL;
761 }
762 sd = per_cpu(svm_data, me);
763 if (!sd) {
764 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
765 return -EINVAL;
766 }
767
768 sd->asid_generation = 1;
769 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
770 sd->next_asid = sd->max_asid + 1;
771
772 gdt = get_current_gdt_rw();
773 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
774
775 wrmsrl(MSR_EFER, efer | EFER_SVME);
776
777 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
778
779 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
780 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
781 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
782 }
783
784
785 /*
786 * Get OSVW bits.
787 *
788 * Note that it is possible to have a system with mixed processor
789 * revisions and therefore different OSVW bits. If bits are not the same
790 * on different processors then choose the worst case (i.e. if erratum
791 * is present on one processor and not on another then assume that the
792 * erratum is present everywhere).
793 */
794 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
795 uint64_t len, status = 0;
796 int err;
797
798 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
799 if (!err)
800 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
801 &err);
802
803 if (err)
804 osvw_status = osvw_len = 0;
805 else {
806 if (len < osvw_len)
807 osvw_len = len;
808 osvw_status |= status;
809 osvw_status &= (1ULL << osvw_len) - 1;
810 }
811 } else
812 osvw_status = osvw_len = 0;
813
814 svm_init_erratum_383();
815
816 amd_pmu_enable_virt();
817
818 return 0;
819 }
820
821 static void svm_cpu_uninit(int cpu)
822 {
823 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
824
825 if (!sd)
826 return;
827
828 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
829 __free_page(sd->save_area);
830 kfree(sd);
831 }
832
833 static int svm_cpu_init(int cpu)
834 {
835 struct svm_cpu_data *sd;
836 int r;
837
838 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
839 if (!sd)
840 return -ENOMEM;
841 sd->cpu = cpu;
842 sd->save_area = alloc_page(GFP_KERNEL);
843 r = -ENOMEM;
844 if (!sd->save_area)
845 goto err_1;
846
847 per_cpu(svm_data, cpu) = sd;
848
849 return 0;
850
851 err_1:
852 kfree(sd);
853 return r;
854
855 }
856
857 static bool valid_msr_intercept(u32 index)
858 {
859 int i;
860
861 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
862 if (direct_access_msrs[i].index == index)
863 return true;
864
865 return false;
866 }
867
868 static void set_msr_interception(u32 *msrpm, unsigned msr,
869 int read, int write)
870 {
871 u8 bit_read, bit_write;
872 unsigned long tmp;
873 u32 offset;
874
875 /*
876 * If this warning triggers extend the direct_access_msrs list at the
877 * beginning of the file
878 */
879 WARN_ON(!valid_msr_intercept(msr));
880
881 offset = svm_msrpm_offset(msr);
882 bit_read = 2 * (msr & 0x0f);
883 bit_write = 2 * (msr & 0x0f) + 1;
884 tmp = msrpm[offset];
885
886 BUG_ON(offset == MSR_INVALID);
887
888 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
889 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
890
891 msrpm[offset] = tmp;
892 }
893
894 static void svm_vcpu_init_msrpm(u32 *msrpm)
895 {
896 int i;
897
898 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
899
900 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
901 if (!direct_access_msrs[i].always)
902 continue;
903
904 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
905 }
906 }
907
908 static void add_msr_offset(u32 offset)
909 {
910 int i;
911
912 for (i = 0; i < MSRPM_OFFSETS; ++i) {
913
914 /* Offset already in list? */
915 if (msrpm_offsets[i] == offset)
916 return;
917
918 /* Slot used by another offset? */
919 if (msrpm_offsets[i] != MSR_INVALID)
920 continue;
921
922 /* Add offset to list */
923 msrpm_offsets[i] = offset;
924
925 return;
926 }
927
928 /*
929 * If this BUG triggers the msrpm_offsets table has an overflow. Just
930 * increase MSRPM_OFFSETS in this case.
931 */
932 BUG();
933 }
934
935 static void init_msrpm_offsets(void)
936 {
937 int i;
938
939 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
940
941 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
942 u32 offset;
943
944 offset = svm_msrpm_offset(direct_access_msrs[i].index);
945 BUG_ON(offset == MSR_INVALID);
946
947 add_msr_offset(offset);
948 }
949 }
950
951 static void svm_enable_lbrv(struct vcpu_svm *svm)
952 {
953 u32 *msrpm = svm->msrpm;
954
955 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
956 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
957 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
958 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
959 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
960 }
961
962 static void svm_disable_lbrv(struct vcpu_svm *svm)
963 {
964 u32 *msrpm = svm->msrpm;
965
966 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
967 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
968 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
969 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
970 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
971 }
972
973 static void disable_nmi_singlestep(struct vcpu_svm *svm)
974 {
975 svm->nmi_singlestep = false;
976 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
977 /* Clear our flags if they were not set by the guest */
978 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
979 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
980 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
981 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
982 }
983 }
984
985 /* Note:
986 * This hash table is used to map VM_ID to a struct kvm_arch,
987 * when handling AMD IOMMU GALOG notification to schedule in
988 * a particular vCPU.
989 */
990 #define SVM_VM_DATA_HASH_BITS 8
991 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
992 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
993
994 /* Note:
995 * This function is called from IOMMU driver to notify
996 * SVM to schedule in a particular vCPU of a particular VM.
997 */
998 static int avic_ga_log_notifier(u32 ga_tag)
999 {
1000 unsigned long flags;
1001 struct kvm_arch *ka = NULL;
1002 struct kvm_vcpu *vcpu = NULL;
1003 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1004 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1005
1006 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1007
1008 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1009 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1010 struct kvm *kvm = container_of(ka, struct kvm, arch);
1011 struct kvm_arch *vm_data = &kvm->arch;
1012
1013 if (vm_data->avic_vm_id != vm_id)
1014 continue;
1015 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1016 break;
1017 }
1018 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1019
1020 if (!vcpu)
1021 return 0;
1022
1023 /* Note:
1024 * At this point, the IOMMU should have already set the pending
1025 * bit in the vAPIC backing page. So, we just need to schedule
1026 * in the vcpu.
1027 */
1028 if (vcpu->mode == OUTSIDE_GUEST_MODE)
1029 kvm_vcpu_wake_up(vcpu);
1030
1031 return 0;
1032 }
1033
1034 static __init int svm_hardware_setup(void)
1035 {
1036 int cpu;
1037 struct page *iopm_pages;
1038 void *iopm_va;
1039 int r;
1040
1041 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1042
1043 if (!iopm_pages)
1044 return -ENOMEM;
1045
1046 iopm_va = page_address(iopm_pages);
1047 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
1048 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1049
1050 init_msrpm_offsets();
1051
1052 if (boot_cpu_has(X86_FEATURE_NX))
1053 kvm_enable_efer_bits(EFER_NX);
1054
1055 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1056 kvm_enable_efer_bits(EFER_FFXSR);
1057
1058 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1059 kvm_has_tsc_control = true;
1060 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1061 kvm_tsc_scaling_ratio_frac_bits = 32;
1062 }
1063
1064 if (nested) {
1065 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
1066 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
1067 }
1068
1069 for_each_possible_cpu(cpu) {
1070 r = svm_cpu_init(cpu);
1071 if (r)
1072 goto err;
1073 }
1074
1075 if (!boot_cpu_has(X86_FEATURE_NPT))
1076 npt_enabled = false;
1077
1078 if (npt_enabled && !npt) {
1079 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1080 npt_enabled = false;
1081 }
1082
1083 if (npt_enabled) {
1084 printk(KERN_INFO "kvm: Nested Paging enabled\n");
1085 kvm_enable_tdp();
1086 } else
1087 kvm_disable_tdp();
1088
1089 if (avic) {
1090 if (!npt_enabled ||
1091 !boot_cpu_has(X86_FEATURE_AVIC) ||
1092 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
1093 avic = false;
1094 } else {
1095 pr_info("AVIC enabled\n");
1096
1097 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1098 }
1099 }
1100
1101 if (vls) {
1102 if (!npt_enabled ||
1103 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
1104 !IS_ENABLED(CONFIG_X86_64)) {
1105 vls = false;
1106 } else {
1107 pr_info("Virtual VMLOAD VMSAVE supported\n");
1108 }
1109 }
1110
1111 return 0;
1112
1113 err:
1114 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1115 iopm_base = 0;
1116 return r;
1117 }
1118
1119 static __exit void svm_hardware_unsetup(void)
1120 {
1121 int cpu;
1122
1123 for_each_possible_cpu(cpu)
1124 svm_cpu_uninit(cpu);
1125
1126 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
1127 iopm_base = 0;
1128 }
1129
1130 static void init_seg(struct vmcb_seg *seg)
1131 {
1132 seg->selector = 0;
1133 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1134 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1135 seg->limit = 0xffff;
1136 seg->base = 0;
1137 }
1138
1139 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1140 {
1141 seg->selector = 0;
1142 seg->attrib = SVM_SELECTOR_P_MASK | type;
1143 seg->limit = 0xffff;
1144 seg->base = 0;
1145 }
1146
1147 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1148 {
1149 struct vcpu_svm *svm = to_svm(vcpu);
1150 u64 g_tsc_offset = 0;
1151
1152 if (is_guest_mode(vcpu)) {
1153 g_tsc_offset = svm->vmcb->control.tsc_offset -
1154 svm->nested.hsave->control.tsc_offset;
1155 svm->nested.hsave->control.tsc_offset = offset;
1156 } else
1157 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1158 svm->vmcb->control.tsc_offset,
1159 offset);
1160
1161 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1162
1163 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1164 }
1165
1166 static void avic_init_vmcb(struct vcpu_svm *svm)
1167 {
1168 struct vmcb *vmcb = svm->vmcb;
1169 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
1170 phys_addr_t bpa = page_to_phys(svm->avic_backing_page);
1171 phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page);
1172 phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page);
1173
1174 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1175 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1176 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1177 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1178 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
1179 svm->vcpu.arch.apicv_active = true;
1180 }
1181
1182 static void init_vmcb(struct vcpu_svm *svm)
1183 {
1184 struct vmcb_control_area *control = &svm->vmcb->control;
1185 struct vmcb_save_area *save = &svm->vmcb->save;
1186
1187 svm->vcpu.arch.hflags = 0;
1188
1189 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1190 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1191 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1192 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1193 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1194 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1195 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1196 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1197
1198 set_dr_intercepts(svm);
1199
1200 set_exception_intercept(svm, PF_VECTOR);
1201 set_exception_intercept(svm, UD_VECTOR);
1202 set_exception_intercept(svm, MC_VECTOR);
1203 set_exception_intercept(svm, AC_VECTOR);
1204 set_exception_intercept(svm, DB_VECTOR);
1205
1206 set_intercept(svm, INTERCEPT_INTR);
1207 set_intercept(svm, INTERCEPT_NMI);
1208 set_intercept(svm, INTERCEPT_SMI);
1209 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1210 set_intercept(svm, INTERCEPT_RDPMC);
1211 set_intercept(svm, INTERCEPT_CPUID);
1212 set_intercept(svm, INTERCEPT_INVD);
1213 set_intercept(svm, INTERCEPT_HLT);
1214 set_intercept(svm, INTERCEPT_INVLPG);
1215 set_intercept(svm, INTERCEPT_INVLPGA);
1216 set_intercept(svm, INTERCEPT_IOIO_PROT);
1217 set_intercept(svm, INTERCEPT_MSR_PROT);
1218 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1219 set_intercept(svm, INTERCEPT_SHUTDOWN);
1220 set_intercept(svm, INTERCEPT_VMRUN);
1221 set_intercept(svm, INTERCEPT_VMMCALL);
1222 set_intercept(svm, INTERCEPT_VMLOAD);
1223 set_intercept(svm, INTERCEPT_VMSAVE);
1224 set_intercept(svm, INTERCEPT_STGI);
1225 set_intercept(svm, INTERCEPT_CLGI);
1226 set_intercept(svm, INTERCEPT_SKINIT);
1227 set_intercept(svm, INTERCEPT_WBINVD);
1228 set_intercept(svm, INTERCEPT_XSETBV);
1229
1230 if (!kvm_mwait_in_guest()) {
1231 set_intercept(svm, INTERCEPT_MONITOR);
1232 set_intercept(svm, INTERCEPT_MWAIT);
1233 }
1234
1235 control->iopm_base_pa = iopm_base;
1236 control->msrpm_base_pa = __pa(svm->msrpm);
1237 control->int_ctl = V_INTR_MASKING_MASK;
1238
1239 init_seg(&save->es);
1240 init_seg(&save->ss);
1241 init_seg(&save->ds);
1242 init_seg(&save->fs);
1243 init_seg(&save->gs);
1244
1245 save->cs.selector = 0xf000;
1246 save->cs.base = 0xffff0000;
1247 /* Executable/Readable Code Segment */
1248 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1249 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1250 save->cs.limit = 0xffff;
1251
1252 save->gdtr.limit = 0xffff;
1253 save->idtr.limit = 0xffff;
1254
1255 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1256 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1257
1258 svm_set_efer(&svm->vcpu, 0);
1259 save->dr6 = 0xffff0ff0;
1260 kvm_set_rflags(&svm->vcpu, 2);
1261 save->rip = 0x0000fff0;
1262 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1263
1264 /*
1265 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1266 * It also updates the guest-visible cr0 value.
1267 */
1268 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1269 kvm_mmu_reset_context(&svm->vcpu);
1270
1271 save->cr4 = X86_CR4_PAE;
1272 /* rdx = ?? */
1273
1274 if (npt_enabled) {
1275 /* Setup VMCB for Nested Paging */
1276 control->nested_ctl = 1;
1277 clr_intercept(svm, INTERCEPT_INVLPG);
1278 clr_exception_intercept(svm, PF_VECTOR);
1279 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1280 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1281 save->g_pat = svm->vcpu.arch.pat;
1282 save->cr3 = 0;
1283 save->cr4 = 0;
1284 }
1285 svm->asid_generation = 0;
1286
1287 svm->nested.vmcb = 0;
1288 svm->vcpu.arch.hflags = 0;
1289
1290 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1291 control->pause_filter_count = 3000;
1292 set_intercept(svm, INTERCEPT_PAUSE);
1293 }
1294
1295 if (avic)
1296 avic_init_vmcb(svm);
1297
1298 /*
1299 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1300 * in VMCB and clear intercepts to avoid #VMEXIT.
1301 */
1302 if (vls) {
1303 clr_intercept(svm, INTERCEPT_VMLOAD);
1304 clr_intercept(svm, INTERCEPT_VMSAVE);
1305 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1306 }
1307
1308 mark_all_dirty(svm->vmcb);
1309
1310 enable_gif(svm);
1311
1312 }
1313
1314 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1315 unsigned int index)
1316 {
1317 u64 *avic_physical_id_table;
1318 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1319
1320 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1321 return NULL;
1322
1323 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1324
1325 return &avic_physical_id_table[index];
1326 }
1327
1328 /**
1329 * Note:
1330 * AVIC hardware walks the nested page table to check permissions,
1331 * but does not use the SPA address specified in the leaf page
1332 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1333 * field of the VMCB. Therefore, we set up the
1334 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1335 */
1336 static int avic_init_access_page(struct kvm_vcpu *vcpu)
1337 {
1338 struct kvm *kvm = vcpu->kvm;
1339 int ret;
1340
1341 if (kvm->arch.apic_access_page_done)
1342 return 0;
1343
1344 ret = x86_set_memory_region(kvm,
1345 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1346 APIC_DEFAULT_PHYS_BASE,
1347 PAGE_SIZE);
1348 if (ret)
1349 return ret;
1350
1351 kvm->arch.apic_access_page_done = true;
1352 return 0;
1353 }
1354
1355 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1356 {
1357 int ret;
1358 u64 *entry, new_entry;
1359 int id = vcpu->vcpu_id;
1360 struct vcpu_svm *svm = to_svm(vcpu);
1361
1362 ret = avic_init_access_page(vcpu);
1363 if (ret)
1364 return ret;
1365
1366 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1367 return -EINVAL;
1368
1369 if (!svm->vcpu.arch.apic->regs)
1370 return -EINVAL;
1371
1372 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1373
1374 /* Setting AVIC backing page address in the phy APIC ID table */
1375 entry = avic_get_physical_id_entry(vcpu, id);
1376 if (!entry)
1377 return -EINVAL;
1378
1379 new_entry = READ_ONCE(*entry);
1380 new_entry = (page_to_phys(svm->avic_backing_page) &
1381 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1382 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
1383 WRITE_ONCE(*entry, new_entry);
1384
1385 svm->avic_physical_id_cache = entry;
1386
1387 return 0;
1388 }
1389
1390 static inline int avic_get_next_vm_id(void)
1391 {
1392 int id;
1393
1394 spin_lock(&avic_vm_id_lock);
1395
1396 /* AVIC VM ID is one-based. */
1397 id = find_next_zero_bit(avic_vm_id_bitmap, AVIC_VM_ID_NR, 1);
1398 if (id <= AVIC_VM_ID_MASK)
1399 __set_bit(id, avic_vm_id_bitmap);
1400 else
1401 id = -EAGAIN;
1402
1403 spin_unlock(&avic_vm_id_lock);
1404 return id;
1405 }
1406
1407 static inline int avic_free_vm_id(int id)
1408 {
1409 if (id <= 0 || id > AVIC_VM_ID_MASK)
1410 return -EINVAL;
1411
1412 spin_lock(&avic_vm_id_lock);
1413 __clear_bit(id, avic_vm_id_bitmap);
1414 spin_unlock(&avic_vm_id_lock);
1415 return 0;
1416 }
1417
1418 static void avic_vm_destroy(struct kvm *kvm)
1419 {
1420 unsigned long flags;
1421 struct kvm_arch *vm_data = &kvm->arch;
1422
1423 if (!avic)
1424 return;
1425
1426 avic_free_vm_id(vm_data->avic_vm_id);
1427
1428 if (vm_data->avic_logical_id_table_page)
1429 __free_page(vm_data->avic_logical_id_table_page);
1430 if (vm_data->avic_physical_id_table_page)
1431 __free_page(vm_data->avic_physical_id_table_page);
1432
1433 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1434 hash_del(&vm_data->hnode);
1435 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1436 }
1437
1438 static int avic_vm_init(struct kvm *kvm)
1439 {
1440 unsigned long flags;
1441 int vm_id, err = -ENOMEM;
1442 struct kvm_arch *vm_data = &kvm->arch;
1443 struct page *p_page;
1444 struct page *l_page;
1445
1446 if (!avic)
1447 return 0;
1448
1449 vm_id = avic_get_next_vm_id();
1450 if (vm_id < 0)
1451 return vm_id;
1452 vm_data->avic_vm_id = (u32)vm_id;
1453
1454 /* Allocating physical APIC ID table (4KB) */
1455 p_page = alloc_page(GFP_KERNEL);
1456 if (!p_page)
1457 goto free_avic;
1458
1459 vm_data->avic_physical_id_table_page = p_page;
1460 clear_page(page_address(p_page));
1461
1462 /* Allocating logical APIC ID table (4KB) */
1463 l_page = alloc_page(GFP_KERNEL);
1464 if (!l_page)
1465 goto free_avic;
1466
1467 vm_data->avic_logical_id_table_page = l_page;
1468 clear_page(page_address(l_page));
1469
1470 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1471 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1472 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1473
1474 return 0;
1475
1476 free_avic:
1477 avic_vm_destroy(kvm);
1478 return err;
1479 }
1480
1481 static inline int
1482 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1483 {
1484 int ret = 0;
1485 unsigned long flags;
1486 struct amd_svm_iommu_ir *ir;
1487 struct vcpu_svm *svm = to_svm(vcpu);
1488
1489 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1490 return 0;
1491
1492 /*
1493 * Here, we go through the per-vcpu ir_list to update all existing
1494 * interrupt remapping table entry targeting this vcpu.
1495 */
1496 spin_lock_irqsave(&svm->ir_list_lock, flags);
1497
1498 if (list_empty(&svm->ir_list))
1499 goto out;
1500
1501 list_for_each_entry(ir, &svm->ir_list, node) {
1502 ret = amd_iommu_update_ga(cpu, r, ir->data);
1503 if (ret)
1504 break;
1505 }
1506 out:
1507 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1508 return ret;
1509 }
1510
1511 static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1512 {
1513 u64 entry;
1514 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
1515 int h_physical_id = kvm_cpu_get_apicid(cpu);
1516 struct vcpu_svm *svm = to_svm(vcpu);
1517
1518 if (!kvm_vcpu_apicv_active(vcpu))
1519 return;
1520
1521 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1522 return;
1523
1524 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1525 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1526
1527 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1528 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1529
1530 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1531 if (svm->avic_is_running)
1532 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1533
1534 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1535 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1536 svm->avic_is_running);
1537 }
1538
1539 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1540 {
1541 u64 entry;
1542 struct vcpu_svm *svm = to_svm(vcpu);
1543
1544 if (!kvm_vcpu_apicv_active(vcpu))
1545 return;
1546
1547 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1548 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1549 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1550
1551 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1552 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1553 }
1554
1555 /**
1556 * This function is called during VCPU halt/unhalt.
1557 */
1558 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1559 {
1560 struct vcpu_svm *svm = to_svm(vcpu);
1561
1562 svm->avic_is_running = is_run;
1563 if (is_run)
1564 avic_vcpu_load(vcpu, vcpu->cpu);
1565 else
1566 avic_vcpu_put(vcpu);
1567 }
1568
1569 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1570 {
1571 struct vcpu_svm *svm = to_svm(vcpu);
1572 u32 dummy;
1573 u32 eax = 1;
1574
1575 if (!init_event) {
1576 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1577 MSR_IA32_APICBASE_ENABLE;
1578 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1579 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1580 }
1581 init_vmcb(svm);
1582
1583 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1584 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1585
1586 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1587 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
1588 }
1589
1590 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1591 {
1592 struct vcpu_svm *svm;
1593 struct page *page;
1594 struct page *msrpm_pages;
1595 struct page *hsave_page;
1596 struct page *nested_msrpm_pages;
1597 int err;
1598
1599 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1600 if (!svm) {
1601 err = -ENOMEM;
1602 goto out;
1603 }
1604
1605 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1606 if (err)
1607 goto free_svm;
1608
1609 err = -ENOMEM;
1610 page = alloc_page(GFP_KERNEL);
1611 if (!page)
1612 goto uninit;
1613
1614 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1615 if (!msrpm_pages)
1616 goto free_page1;
1617
1618 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1619 if (!nested_msrpm_pages)
1620 goto free_page2;
1621
1622 hsave_page = alloc_page(GFP_KERNEL);
1623 if (!hsave_page)
1624 goto free_page3;
1625
1626 if (avic) {
1627 err = avic_init_backing_page(&svm->vcpu);
1628 if (err)
1629 goto free_page4;
1630
1631 INIT_LIST_HEAD(&svm->ir_list);
1632 spin_lock_init(&svm->ir_list_lock);
1633 }
1634
1635 /* We initialize this flag to true to make sure that the is_running
1636 * bit would be set the first time the vcpu is loaded.
1637 */
1638 svm->avic_is_running = true;
1639
1640 svm->nested.hsave = page_address(hsave_page);
1641
1642 svm->msrpm = page_address(msrpm_pages);
1643 svm_vcpu_init_msrpm(svm->msrpm);
1644
1645 svm->nested.msrpm = page_address(nested_msrpm_pages);
1646 svm_vcpu_init_msrpm(svm->nested.msrpm);
1647
1648 svm->vmcb = page_address(page);
1649 clear_page(svm->vmcb);
1650 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1651 svm->asid_generation = 0;
1652 init_vmcb(svm);
1653
1654 svm_init_osvw(&svm->vcpu);
1655
1656 return &svm->vcpu;
1657
1658 free_page4:
1659 __free_page(hsave_page);
1660 free_page3:
1661 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1662 free_page2:
1663 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1664 free_page1:
1665 __free_page(page);
1666 uninit:
1667 kvm_vcpu_uninit(&svm->vcpu);
1668 free_svm:
1669 kmem_cache_free(kvm_vcpu_cache, svm);
1670 out:
1671 return ERR_PTR(err);
1672 }
1673
1674 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1675 {
1676 struct vcpu_svm *svm = to_svm(vcpu);
1677
1678 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1679 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1680 __free_page(virt_to_page(svm->nested.hsave));
1681 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1682 kvm_vcpu_uninit(vcpu);
1683 kmem_cache_free(kvm_vcpu_cache, svm);
1684 }
1685
1686 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1687 {
1688 struct vcpu_svm *svm = to_svm(vcpu);
1689 int i;
1690
1691 if (unlikely(cpu != vcpu->cpu)) {
1692 svm->asid_generation = 0;
1693 mark_all_dirty(svm->vmcb);
1694 }
1695
1696 #ifdef CONFIG_X86_64
1697 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1698 #endif
1699 savesegment(fs, svm->host.fs);
1700 savesegment(gs, svm->host.gs);
1701 svm->host.ldt = kvm_read_ldt();
1702
1703 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1704 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1705
1706 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1707 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1708 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1709 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1710 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1711 }
1712 }
1713 /* This assumes that the kernel never uses MSR_TSC_AUX */
1714 if (static_cpu_has(X86_FEATURE_RDTSCP))
1715 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
1716
1717 avic_vcpu_load(vcpu, cpu);
1718 }
1719
1720 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1721 {
1722 struct vcpu_svm *svm = to_svm(vcpu);
1723 int i;
1724
1725 avic_vcpu_put(vcpu);
1726
1727 ++vcpu->stat.host_state_reload;
1728 kvm_load_ldt(svm->host.ldt);
1729 #ifdef CONFIG_X86_64
1730 loadsegment(fs, svm->host.fs);
1731 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
1732 load_gs_index(svm->host.gs);
1733 #else
1734 #ifdef CONFIG_X86_32_LAZY_GS
1735 loadsegment(gs, svm->host.gs);
1736 #endif
1737 #endif
1738 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1739 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1740 }
1741
1742 static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
1743 {
1744 avic_set_running(vcpu, false);
1745 }
1746
1747 static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
1748 {
1749 avic_set_running(vcpu, true);
1750 }
1751
1752 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1753 {
1754 struct vcpu_svm *svm = to_svm(vcpu);
1755 unsigned long rflags = svm->vmcb->save.rflags;
1756
1757 if (svm->nmi_singlestep) {
1758 /* Hide our flags if they were not set by the guest */
1759 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1760 rflags &= ~X86_EFLAGS_TF;
1761 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1762 rflags &= ~X86_EFLAGS_RF;
1763 }
1764 return rflags;
1765 }
1766
1767 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1768 {
1769 if (to_svm(vcpu)->nmi_singlestep)
1770 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1771
1772 /*
1773 * Any change of EFLAGS.VM is accompanied by a reload of SS
1774 * (caused by either a task switch or an inter-privilege IRET),
1775 * so we do not need to update the CPL here.
1776 */
1777 to_svm(vcpu)->vmcb->save.rflags = rflags;
1778 }
1779
1780 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1781 {
1782 switch (reg) {
1783 case VCPU_EXREG_PDPTR:
1784 BUG_ON(!npt_enabled);
1785 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1786 break;
1787 default:
1788 BUG();
1789 }
1790 }
1791
1792 static void svm_set_vintr(struct vcpu_svm *svm)
1793 {
1794 set_intercept(svm, INTERCEPT_VINTR);
1795 }
1796
1797 static void svm_clear_vintr(struct vcpu_svm *svm)
1798 {
1799 clr_intercept(svm, INTERCEPT_VINTR);
1800 }
1801
1802 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1803 {
1804 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1805
1806 switch (seg) {
1807 case VCPU_SREG_CS: return &save->cs;
1808 case VCPU_SREG_DS: return &save->ds;
1809 case VCPU_SREG_ES: return &save->es;
1810 case VCPU_SREG_FS: return &save->fs;
1811 case VCPU_SREG_GS: return &save->gs;
1812 case VCPU_SREG_SS: return &save->ss;
1813 case VCPU_SREG_TR: return &save->tr;
1814 case VCPU_SREG_LDTR: return &save->ldtr;
1815 }
1816 BUG();
1817 return NULL;
1818 }
1819
1820 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1821 {
1822 struct vmcb_seg *s = svm_seg(vcpu, seg);
1823
1824 return s->base;
1825 }
1826
1827 static void svm_get_segment(struct kvm_vcpu *vcpu,
1828 struct kvm_segment *var, int seg)
1829 {
1830 struct vmcb_seg *s = svm_seg(vcpu, seg);
1831
1832 var->base = s->base;
1833 var->limit = s->limit;
1834 var->selector = s->selector;
1835 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1836 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1837 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1838 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1839 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1840 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1841 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1842
1843 /*
1844 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1845 * However, the SVM spec states that the G bit is not observed by the
1846 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1847 * So let's synthesize a legal G bit for all segments, this helps
1848 * running KVM nested. It also helps cross-vendor migration, because
1849 * Intel's vmentry has a check on the 'G' bit.
1850 */
1851 var->g = s->limit > 0xfffff;
1852
1853 /*
1854 * AMD's VMCB does not have an explicit unusable field, so emulate it
1855 * for cross vendor migration purposes by "not present"
1856 */
1857 var->unusable = !var->present;
1858
1859 switch (seg) {
1860 case VCPU_SREG_TR:
1861 /*
1862 * Work around a bug where the busy flag in the tr selector
1863 * isn't exposed
1864 */
1865 var->type |= 0x2;
1866 break;
1867 case VCPU_SREG_DS:
1868 case VCPU_SREG_ES:
1869 case VCPU_SREG_FS:
1870 case VCPU_SREG_GS:
1871 /*
1872 * The accessed bit must always be set in the segment
1873 * descriptor cache, although it can be cleared in the
1874 * descriptor, the cached bit always remains at 1. Since
1875 * Intel has a check on this, set it here to support
1876 * cross-vendor migration.
1877 */
1878 if (!var->unusable)
1879 var->type |= 0x1;
1880 break;
1881 case VCPU_SREG_SS:
1882 /*
1883 * On AMD CPUs sometimes the DB bit in the segment
1884 * descriptor is left as 1, although the whole segment has
1885 * been made unusable. Clear it here to pass an Intel VMX
1886 * entry check when cross vendor migrating.
1887 */
1888 if (var->unusable)
1889 var->db = 0;
1890 /* This is symmetric with svm_set_segment() */
1891 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1892 break;
1893 }
1894 }
1895
1896 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1897 {
1898 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1899
1900 return save->cpl;
1901 }
1902
1903 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1904 {
1905 struct vcpu_svm *svm = to_svm(vcpu);
1906
1907 dt->size = svm->vmcb->save.idtr.limit;
1908 dt->address = svm->vmcb->save.idtr.base;
1909 }
1910
1911 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1912 {
1913 struct vcpu_svm *svm = to_svm(vcpu);
1914
1915 svm->vmcb->save.idtr.limit = dt->size;
1916 svm->vmcb->save.idtr.base = dt->address ;
1917 mark_dirty(svm->vmcb, VMCB_DT);
1918 }
1919
1920 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1921 {
1922 struct vcpu_svm *svm = to_svm(vcpu);
1923
1924 dt->size = svm->vmcb->save.gdtr.limit;
1925 dt->address = svm->vmcb->save.gdtr.base;
1926 }
1927
1928 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1929 {
1930 struct vcpu_svm *svm = to_svm(vcpu);
1931
1932 svm->vmcb->save.gdtr.limit = dt->size;
1933 svm->vmcb->save.gdtr.base = dt->address ;
1934 mark_dirty(svm->vmcb, VMCB_DT);
1935 }
1936
1937 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1938 {
1939 }
1940
1941 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1942 {
1943 }
1944
1945 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1946 {
1947 }
1948
1949 static void update_cr0_intercept(struct vcpu_svm *svm)
1950 {
1951 ulong gcr0 = svm->vcpu.arch.cr0;
1952 u64 *hcr0 = &svm->vmcb->save.cr0;
1953
1954 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1955 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1956
1957 mark_dirty(svm->vmcb, VMCB_CR);
1958
1959 if (gcr0 == *hcr0) {
1960 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1961 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1962 } else {
1963 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1964 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1965 }
1966 }
1967
1968 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1969 {
1970 struct vcpu_svm *svm = to_svm(vcpu);
1971
1972 #ifdef CONFIG_X86_64
1973 if (vcpu->arch.efer & EFER_LME) {
1974 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1975 vcpu->arch.efer |= EFER_LMA;
1976 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1977 }
1978
1979 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1980 vcpu->arch.efer &= ~EFER_LMA;
1981 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1982 }
1983 }
1984 #endif
1985 vcpu->arch.cr0 = cr0;
1986
1987 if (!npt_enabled)
1988 cr0 |= X86_CR0_PG | X86_CR0_WP;
1989
1990 /*
1991 * re-enable caching here because the QEMU bios
1992 * does not do it - this results in some delay at
1993 * reboot
1994 */
1995 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1996 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1997 svm->vmcb->save.cr0 = cr0;
1998 mark_dirty(svm->vmcb, VMCB_CR);
1999 update_cr0_intercept(svm);
2000 }
2001
2002 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2003 {
2004 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
2005 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2006
2007 if (cr4 & X86_CR4_VMXE)
2008 return 1;
2009
2010 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
2011 svm_flush_tlb(vcpu);
2012
2013 vcpu->arch.cr4 = cr4;
2014 if (!npt_enabled)
2015 cr4 |= X86_CR4_PAE;
2016 cr4 |= host_cr4_mce;
2017 to_svm(vcpu)->vmcb->save.cr4 = cr4;
2018 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
2019 return 0;
2020 }
2021
2022 static void svm_set_segment(struct kvm_vcpu *vcpu,
2023 struct kvm_segment *var, int seg)
2024 {
2025 struct vcpu_svm *svm = to_svm(vcpu);
2026 struct vmcb_seg *s = svm_seg(vcpu, seg);
2027
2028 s->base = var->base;
2029 s->limit = var->limit;
2030 s->selector = var->selector;
2031 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2032 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2033 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2034 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2035 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2036 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2037 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2038 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
2039
2040 /*
2041 * This is always accurate, except if SYSRET returned to a segment
2042 * with SS.DPL != 3. Intel does not have this quirk, and always
2043 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2044 * would entail passing the CPL to userspace and back.
2045 */
2046 if (seg == VCPU_SREG_SS)
2047 /* This is symmetric with svm_get_segment() */
2048 svm->vmcb->save.cpl = (var->dpl & 3);
2049
2050 mark_dirty(svm->vmcb, VMCB_SEG);
2051 }
2052
2053 static void update_bp_intercept(struct kvm_vcpu *vcpu)
2054 {
2055 struct vcpu_svm *svm = to_svm(vcpu);
2056
2057 clr_exception_intercept(svm, BP_VECTOR);
2058
2059 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
2060 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2061 set_exception_intercept(svm, BP_VECTOR);
2062 } else
2063 vcpu->guest_debug = 0;
2064 }
2065
2066 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
2067 {
2068 if (sd->next_asid > sd->max_asid) {
2069 ++sd->asid_generation;
2070 sd->next_asid = 1;
2071 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
2072 }
2073
2074 svm->asid_generation = sd->asid_generation;
2075 svm->vmcb->control.asid = sd->next_asid++;
2076
2077 mark_dirty(svm->vmcb, VMCB_ASID);
2078 }
2079
2080 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2081 {
2082 return to_svm(vcpu)->vmcb->save.dr6;
2083 }
2084
2085 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2086 {
2087 struct vcpu_svm *svm = to_svm(vcpu);
2088
2089 svm->vmcb->save.dr6 = value;
2090 mark_dirty(svm->vmcb, VMCB_DR);
2091 }
2092
2093 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2094 {
2095 struct vcpu_svm *svm = to_svm(vcpu);
2096
2097 get_debugreg(vcpu->arch.db[0], 0);
2098 get_debugreg(vcpu->arch.db[1], 1);
2099 get_debugreg(vcpu->arch.db[2], 2);
2100 get_debugreg(vcpu->arch.db[3], 3);
2101 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2102 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2103
2104 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2105 set_dr_intercepts(svm);
2106 }
2107
2108 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2109 {
2110 struct vcpu_svm *svm = to_svm(vcpu);
2111
2112 svm->vmcb->save.dr7 = value;
2113 mark_dirty(svm->vmcb, VMCB_DR);
2114 }
2115
2116 static int pf_interception(struct vcpu_svm *svm)
2117 {
2118 u64 fault_address = svm->vmcb->control.exit_info_2;
2119 u64 error_code = svm->vmcb->control.exit_info_1;
2120
2121 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
2122 svm->vmcb->control.insn_bytes,
2123 svm->vmcb->control.insn_len, !npt_enabled);
2124 }
2125
2126 static int db_interception(struct vcpu_svm *svm)
2127 {
2128 struct kvm_run *kvm_run = svm->vcpu.run;
2129
2130 if (!(svm->vcpu.guest_debug &
2131 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2132 !svm->nmi_singlestep) {
2133 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2134 return 1;
2135 }
2136
2137 if (svm->nmi_singlestep) {
2138 disable_nmi_singlestep(svm);
2139 }
2140
2141 if (svm->vcpu.guest_debug &
2142 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2143 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2144 kvm_run->debug.arch.pc =
2145 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2146 kvm_run->debug.arch.exception = DB_VECTOR;
2147 return 0;
2148 }
2149
2150 return 1;
2151 }
2152
2153 static int bp_interception(struct vcpu_svm *svm)
2154 {
2155 struct kvm_run *kvm_run = svm->vcpu.run;
2156
2157 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2158 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2159 kvm_run->debug.arch.exception = BP_VECTOR;
2160 return 0;
2161 }
2162
2163 static int ud_interception(struct vcpu_svm *svm)
2164 {
2165 int er;
2166
2167 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
2168 if (er != EMULATE_DONE)
2169 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2170 return 1;
2171 }
2172
2173 static int ac_interception(struct vcpu_svm *svm)
2174 {
2175 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2176 return 1;
2177 }
2178
2179 static bool is_erratum_383(void)
2180 {
2181 int err, i;
2182 u64 value;
2183
2184 if (!erratum_383_found)
2185 return false;
2186
2187 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2188 if (err)
2189 return false;
2190
2191 /* Bit 62 may or may not be set for this mce */
2192 value &= ~(1ULL << 62);
2193
2194 if (value != 0xb600000000010015ULL)
2195 return false;
2196
2197 /* Clear MCi_STATUS registers */
2198 for (i = 0; i < 6; ++i)
2199 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2200
2201 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2202 if (!err) {
2203 u32 low, high;
2204
2205 value &= ~(1ULL << 2);
2206 low = lower_32_bits(value);
2207 high = upper_32_bits(value);
2208
2209 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2210 }
2211
2212 /* Flush tlb to evict multi-match entries */
2213 __flush_tlb_all();
2214
2215 return true;
2216 }
2217
2218 static void svm_handle_mce(struct vcpu_svm *svm)
2219 {
2220 if (is_erratum_383()) {
2221 /*
2222 * Erratum 383 triggered. Guest state is corrupt so kill the
2223 * guest.
2224 */
2225 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2226
2227 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
2228
2229 return;
2230 }
2231
2232 /*
2233 * On an #MC intercept the MCE handler is not called automatically in
2234 * the host. So do it by hand here.
2235 */
2236 asm volatile (
2237 "int $0x12\n");
2238 /* not sure if we ever come back to this point */
2239
2240 return;
2241 }
2242
2243 static int mc_interception(struct vcpu_svm *svm)
2244 {
2245 return 1;
2246 }
2247
2248 static int shutdown_interception(struct vcpu_svm *svm)
2249 {
2250 struct kvm_run *kvm_run = svm->vcpu.run;
2251
2252 /*
2253 * VMCB is undefined after a SHUTDOWN intercept
2254 * so reinitialize it.
2255 */
2256 clear_page(svm->vmcb);
2257 init_vmcb(svm);
2258
2259 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2260 return 0;
2261 }
2262
2263 static int io_interception(struct vcpu_svm *svm)
2264 {
2265 struct kvm_vcpu *vcpu = &svm->vcpu;
2266 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2267 int size, in, string, ret;
2268 unsigned port;
2269
2270 ++svm->vcpu.stat.io_exits;
2271 string = (io_info & SVM_IOIO_STR_MASK) != 0;
2272 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2273 if (string)
2274 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
2275
2276 port = io_info >> 16;
2277 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2278 svm->next_rip = svm->vmcb->control.exit_info_2;
2279 ret = kvm_skip_emulated_instruction(&svm->vcpu);
2280
2281 /*
2282 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
2283 * KVM_EXIT_DEBUG here.
2284 */
2285 if (in)
2286 return kvm_fast_pio_in(vcpu, size, port) && ret;
2287 else
2288 return kvm_fast_pio_out(vcpu, size, port) && ret;
2289 }
2290
2291 static int nmi_interception(struct vcpu_svm *svm)
2292 {
2293 return 1;
2294 }
2295
2296 static int intr_interception(struct vcpu_svm *svm)
2297 {
2298 ++svm->vcpu.stat.irq_exits;
2299 return 1;
2300 }
2301
2302 static int nop_on_interception(struct vcpu_svm *svm)
2303 {
2304 return 1;
2305 }
2306
2307 static int halt_interception(struct vcpu_svm *svm)
2308 {
2309 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
2310 return kvm_emulate_halt(&svm->vcpu);
2311 }
2312
2313 static int vmmcall_interception(struct vcpu_svm *svm)
2314 {
2315 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2316 return kvm_emulate_hypercall(&svm->vcpu);
2317 }
2318
2319 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2320 {
2321 struct vcpu_svm *svm = to_svm(vcpu);
2322
2323 return svm->nested.nested_cr3;
2324 }
2325
2326 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2327 {
2328 struct vcpu_svm *svm = to_svm(vcpu);
2329 u64 cr3 = svm->nested.nested_cr3;
2330 u64 pdpte;
2331 int ret;
2332
2333 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
2334 offset_in_page(cr3) + index * 8, 8);
2335 if (ret)
2336 return 0;
2337 return pdpte;
2338 }
2339
2340 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2341 unsigned long root)
2342 {
2343 struct vcpu_svm *svm = to_svm(vcpu);
2344
2345 svm->vmcb->control.nested_cr3 = root;
2346 mark_dirty(svm->vmcb, VMCB_NPT);
2347 svm_flush_tlb(vcpu);
2348 }
2349
2350 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2351 struct x86_exception *fault)
2352 {
2353 struct vcpu_svm *svm = to_svm(vcpu);
2354
2355 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2356 /*
2357 * TODO: track the cause of the nested page fault, and
2358 * correctly fill in the high bits of exit_info_1.
2359 */
2360 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2361 svm->vmcb->control.exit_code_hi = 0;
2362 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2363 svm->vmcb->control.exit_info_2 = fault->address;
2364 }
2365
2366 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2367 svm->vmcb->control.exit_info_1 |= fault->error_code;
2368
2369 /*
2370 * The present bit is always zero for page structure faults on real
2371 * hardware.
2372 */
2373 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2374 svm->vmcb->control.exit_info_1 &= ~1;
2375
2376 nested_svm_vmexit(svm);
2377 }
2378
2379 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
2380 {
2381 WARN_ON(mmu_is_nested(vcpu));
2382 kvm_init_shadow_mmu(vcpu);
2383 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2384 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
2385 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
2386 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
2387 vcpu->arch.mmu.shadow_root_level = get_npt_level();
2388 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
2389 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
2390 }
2391
2392 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2393 {
2394 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2395 }
2396
2397 static int nested_svm_check_permissions(struct vcpu_svm *svm)
2398 {
2399 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2400 !is_paging(&svm->vcpu)) {
2401 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2402 return 1;
2403 }
2404
2405 if (svm->vmcb->save.cpl) {
2406 kvm_inject_gp(&svm->vcpu, 0);
2407 return 1;
2408 }
2409
2410 return 0;
2411 }
2412
2413 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2414 bool has_error_code, u32 error_code)
2415 {
2416 int vmexit;
2417
2418 if (!is_guest_mode(&svm->vcpu))
2419 return 0;
2420
2421 vmexit = nested_svm_intercept(svm);
2422 if (vmexit != NESTED_EXIT_DONE)
2423 return 0;
2424
2425 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2426 svm->vmcb->control.exit_code_hi = 0;
2427 svm->vmcb->control.exit_info_1 = error_code;
2428
2429 /*
2430 * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
2431 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2432 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
2433 * written only when inject_pending_event runs (DR6 would written here
2434 * too). This should be conditional on a new capability---if the
2435 * capability is disabled, kvm_multiple_exception would write the
2436 * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
2437 */
2438 if (svm->vcpu.arch.exception.nested_apf)
2439 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2440 else
2441 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2442
2443 svm->nested.exit_required = true;
2444 return vmexit;
2445 }
2446
2447 /* This function returns true if it is save to enable the irq window */
2448 static inline bool nested_svm_intr(struct vcpu_svm *svm)
2449 {
2450 if (!is_guest_mode(&svm->vcpu))
2451 return true;
2452
2453 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2454 return true;
2455
2456 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
2457 return false;
2458
2459 /*
2460 * if vmexit was already requested (by intercepted exception
2461 * for instance) do not overwrite it with "external interrupt"
2462 * vmexit.
2463 */
2464 if (svm->nested.exit_required)
2465 return false;
2466
2467 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2468 svm->vmcb->control.exit_info_1 = 0;
2469 svm->vmcb->control.exit_info_2 = 0;
2470
2471 if (svm->nested.intercept & 1ULL) {
2472 /*
2473 * The #vmexit can't be emulated here directly because this
2474 * code path runs with irqs and preemption disabled. A
2475 * #vmexit emulation might sleep. Only signal request for
2476 * the #vmexit here.
2477 */
2478 svm->nested.exit_required = true;
2479 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2480 return false;
2481 }
2482
2483 return true;
2484 }
2485
2486 /* This function returns true if it is save to enable the nmi window */
2487 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2488 {
2489 if (!is_guest_mode(&svm->vcpu))
2490 return true;
2491
2492 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2493 return true;
2494
2495 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2496 svm->nested.exit_required = true;
2497
2498 return false;
2499 }
2500
2501 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2502 {
2503 struct page *page;
2504
2505 might_sleep();
2506
2507 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
2508 if (is_error_page(page))
2509 goto error;
2510
2511 *_page = page;
2512
2513 return kmap(page);
2514
2515 error:
2516 kvm_inject_gp(&svm->vcpu, 0);
2517
2518 return NULL;
2519 }
2520
2521 static void nested_svm_unmap(struct page *page)
2522 {
2523 kunmap(page);
2524 kvm_release_page_dirty(page);
2525 }
2526
2527 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
2528 {
2529 unsigned port, size, iopm_len;
2530 u16 val, mask;
2531 u8 start_bit;
2532 u64 gpa;
2533
2534 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2535 return NESTED_EXIT_HOST;
2536
2537 port = svm->vmcb->control.exit_info_1 >> 16;
2538 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2539 SVM_IOIO_SIZE_SHIFT;
2540 gpa = svm->nested.vmcb_iopm + (port / 8);
2541 start_bit = port % 8;
2542 iopm_len = (start_bit + size > 8) ? 2 : 1;
2543 mask = (0xf >> (4 - size)) << start_bit;
2544 val = 0;
2545
2546 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
2547 return NESTED_EXIT_DONE;
2548
2549 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2550 }
2551
2552 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2553 {
2554 u32 offset, msr, value;
2555 int write, mask;
2556
2557 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2558 return NESTED_EXIT_HOST;
2559
2560 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2561 offset = svm_msrpm_offset(msr);
2562 write = svm->vmcb->control.exit_info_1 & 1;
2563 mask = 1 << ((2 * (msr & 0xf)) + write);
2564
2565 if (offset == MSR_INVALID)
2566 return NESTED_EXIT_DONE;
2567
2568 /* Offset is in 32 bit units but need in 8 bit units */
2569 offset *= 4;
2570
2571 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
2572 return NESTED_EXIT_DONE;
2573
2574 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2575 }
2576
2577 /* DB exceptions for our internal use must not cause vmexit */
2578 static int nested_svm_intercept_db(struct vcpu_svm *svm)
2579 {
2580 unsigned long dr6;
2581
2582 /* if we're not singlestepping, it's not ours */
2583 if (!svm->nmi_singlestep)
2584 return NESTED_EXIT_DONE;
2585
2586 /* if it's not a singlestep exception, it's not ours */
2587 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
2588 return NESTED_EXIT_DONE;
2589 if (!(dr6 & DR6_BS))
2590 return NESTED_EXIT_DONE;
2591
2592 /* if the guest is singlestepping, it should get the vmexit */
2593 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
2594 disable_nmi_singlestep(svm);
2595 return NESTED_EXIT_DONE;
2596 }
2597
2598 /* it's ours, the nested hypervisor must not see this one */
2599 return NESTED_EXIT_HOST;
2600 }
2601
2602 static int nested_svm_exit_special(struct vcpu_svm *svm)
2603 {
2604 u32 exit_code = svm->vmcb->control.exit_code;
2605
2606 switch (exit_code) {
2607 case SVM_EXIT_INTR:
2608 case SVM_EXIT_NMI:
2609 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2610 return NESTED_EXIT_HOST;
2611 case SVM_EXIT_NPF:
2612 /* For now we are always handling NPFs when using them */
2613 if (npt_enabled)
2614 return NESTED_EXIT_HOST;
2615 break;
2616 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
2617 /* When we're shadowing, trap PFs, but not async PF */
2618 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
2619 return NESTED_EXIT_HOST;
2620 break;
2621 default:
2622 break;
2623 }
2624
2625 return NESTED_EXIT_CONTINUE;
2626 }
2627
2628 /*
2629 * If this function returns true, this #vmexit was already handled
2630 */
2631 static int nested_svm_intercept(struct vcpu_svm *svm)
2632 {
2633 u32 exit_code = svm->vmcb->control.exit_code;
2634 int vmexit = NESTED_EXIT_HOST;
2635
2636 switch (exit_code) {
2637 case SVM_EXIT_MSR:
2638 vmexit = nested_svm_exit_handled_msr(svm);
2639 break;
2640 case SVM_EXIT_IOIO:
2641 vmexit = nested_svm_intercept_ioio(svm);
2642 break;
2643 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2644 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2645 if (svm->nested.intercept_cr & bit)
2646 vmexit = NESTED_EXIT_DONE;
2647 break;
2648 }
2649 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2650 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2651 if (svm->nested.intercept_dr & bit)
2652 vmexit = NESTED_EXIT_DONE;
2653 break;
2654 }
2655 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2656 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
2657 if (svm->nested.intercept_exceptions & excp_bits) {
2658 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
2659 vmexit = nested_svm_intercept_db(svm);
2660 else
2661 vmexit = NESTED_EXIT_DONE;
2662 }
2663 /* async page fault always cause vmexit */
2664 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2665 svm->vcpu.arch.exception.nested_apf != 0)
2666 vmexit = NESTED_EXIT_DONE;
2667 break;
2668 }
2669 case SVM_EXIT_ERR: {
2670 vmexit = NESTED_EXIT_DONE;
2671 break;
2672 }
2673 default: {
2674 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
2675 if (svm->nested.intercept & exit_bits)
2676 vmexit = NESTED_EXIT_DONE;
2677 }
2678 }
2679
2680 return vmexit;
2681 }
2682
2683 static int nested_svm_exit_handled(struct vcpu_svm *svm)
2684 {
2685 int vmexit;
2686
2687 vmexit = nested_svm_intercept(svm);
2688
2689 if (vmexit == NESTED_EXIT_DONE)
2690 nested_svm_vmexit(svm);
2691
2692 return vmexit;
2693 }
2694
2695 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2696 {
2697 struct vmcb_control_area *dst = &dst_vmcb->control;
2698 struct vmcb_control_area *from = &from_vmcb->control;
2699
2700 dst->intercept_cr = from->intercept_cr;
2701 dst->intercept_dr = from->intercept_dr;
2702 dst->intercept_exceptions = from->intercept_exceptions;
2703 dst->intercept = from->intercept;
2704 dst->iopm_base_pa = from->iopm_base_pa;
2705 dst->msrpm_base_pa = from->msrpm_base_pa;
2706 dst->tsc_offset = from->tsc_offset;
2707 dst->asid = from->asid;
2708 dst->tlb_ctl = from->tlb_ctl;
2709 dst->int_ctl = from->int_ctl;
2710 dst->int_vector = from->int_vector;
2711 dst->int_state = from->int_state;
2712 dst->exit_code = from->exit_code;
2713 dst->exit_code_hi = from->exit_code_hi;
2714 dst->exit_info_1 = from->exit_info_1;
2715 dst->exit_info_2 = from->exit_info_2;
2716 dst->exit_int_info = from->exit_int_info;
2717 dst->exit_int_info_err = from->exit_int_info_err;
2718 dst->nested_ctl = from->nested_ctl;
2719 dst->event_inj = from->event_inj;
2720 dst->event_inj_err = from->event_inj_err;
2721 dst->nested_cr3 = from->nested_cr3;
2722 dst->virt_ext = from->virt_ext;
2723 }
2724
2725 static int nested_svm_vmexit(struct vcpu_svm *svm)
2726 {
2727 struct vmcb *nested_vmcb;
2728 struct vmcb *hsave = svm->nested.hsave;
2729 struct vmcb *vmcb = svm->vmcb;
2730 struct page *page;
2731
2732 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2733 vmcb->control.exit_info_1,
2734 vmcb->control.exit_info_2,
2735 vmcb->control.exit_int_info,
2736 vmcb->control.exit_int_info_err,
2737 KVM_ISA_SVM);
2738
2739 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2740 if (!nested_vmcb)
2741 return 1;
2742
2743 /* Exit Guest-Mode */
2744 leave_guest_mode(&svm->vcpu);
2745 svm->nested.vmcb = 0;
2746
2747 /* Give the current vmcb to the guest */
2748 disable_gif(svm);
2749
2750 nested_vmcb->save.es = vmcb->save.es;
2751 nested_vmcb->save.cs = vmcb->save.cs;
2752 nested_vmcb->save.ss = vmcb->save.ss;
2753 nested_vmcb->save.ds = vmcb->save.ds;
2754 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2755 nested_vmcb->save.idtr = vmcb->save.idtr;
2756 nested_vmcb->save.efer = svm->vcpu.arch.efer;
2757 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
2758 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
2759 nested_vmcb->save.cr2 = vmcb->save.cr2;
2760 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
2761 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2762 nested_vmcb->save.rip = vmcb->save.rip;
2763 nested_vmcb->save.rsp = vmcb->save.rsp;
2764 nested_vmcb->save.rax = vmcb->save.rax;
2765 nested_vmcb->save.dr7 = vmcb->save.dr7;
2766 nested_vmcb->save.dr6 = vmcb->save.dr6;
2767 nested_vmcb->save.cpl = vmcb->save.cpl;
2768
2769 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2770 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2771 nested_vmcb->control.int_state = vmcb->control.int_state;
2772 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2773 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2774 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2775 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2776 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2777 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2778
2779 if (svm->nrips_enabled)
2780 nested_vmcb->control.next_rip = vmcb->control.next_rip;
2781
2782 /*
2783 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2784 * to make sure that we do not lose injected events. So check event_inj
2785 * here and copy it to exit_int_info if it is valid.
2786 * Exit_int_info and event_inj can't be both valid because the case
2787 * below only happens on a VMRUN instruction intercept which has
2788 * no valid exit_int_info set.
2789 */
2790 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2791 struct vmcb_control_area *nc = &nested_vmcb->control;
2792
2793 nc->exit_int_info = vmcb->control.event_inj;
2794 nc->exit_int_info_err = vmcb->control.event_inj_err;
2795 }
2796
2797 nested_vmcb->control.tlb_ctl = 0;
2798 nested_vmcb->control.event_inj = 0;
2799 nested_vmcb->control.event_inj_err = 0;
2800
2801 /* We always set V_INTR_MASKING and remember the old value in hflags */
2802 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2803 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2804
2805 /* Restore the original control entries */
2806 copy_vmcb_control_area(vmcb, hsave);
2807
2808 kvm_clear_exception_queue(&svm->vcpu);
2809 kvm_clear_interrupt_queue(&svm->vcpu);
2810
2811 svm->nested.nested_cr3 = 0;
2812
2813 /* Restore selected save entries */
2814 svm->vmcb->save.es = hsave->save.es;
2815 svm->vmcb->save.cs = hsave->save.cs;
2816 svm->vmcb->save.ss = hsave->save.ss;
2817 svm->vmcb->save.ds = hsave->save.ds;
2818 svm->vmcb->save.gdtr = hsave->save.gdtr;
2819 svm->vmcb->save.idtr = hsave->save.idtr;
2820 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2821 svm_set_efer(&svm->vcpu, hsave->save.efer);
2822 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2823 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2824 if (npt_enabled) {
2825 svm->vmcb->save.cr3 = hsave->save.cr3;
2826 svm->vcpu.arch.cr3 = hsave->save.cr3;
2827 } else {
2828 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2829 }
2830 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2831 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2832 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2833 svm->vmcb->save.dr7 = 0;
2834 svm->vmcb->save.cpl = 0;
2835 svm->vmcb->control.exit_int_info = 0;
2836
2837 mark_all_dirty(svm->vmcb);
2838
2839 nested_svm_unmap(page);
2840
2841 nested_svm_uninit_mmu_context(&svm->vcpu);
2842 kvm_mmu_reset_context(&svm->vcpu);
2843 kvm_mmu_load(&svm->vcpu);
2844
2845 return 0;
2846 }
2847
2848 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
2849 {
2850 /*
2851 * This function merges the msr permission bitmaps of kvm and the
2852 * nested vmcb. It is optimized in that it only merges the parts where
2853 * the kvm msr permission bitmap may contain zero bits
2854 */
2855 int i;
2856
2857 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2858 return true;
2859
2860 for (i = 0; i < MSRPM_OFFSETS; i++) {
2861 u32 value, p;
2862 u64 offset;
2863
2864 if (msrpm_offsets[i] == 0xffffffff)
2865 break;
2866
2867 p = msrpm_offsets[i];
2868 offset = svm->nested.vmcb_msrpm + (p * 4);
2869
2870 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
2871 return false;
2872
2873 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2874 }
2875
2876 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2877
2878 return true;
2879 }
2880
2881 static bool nested_vmcb_checks(struct vmcb *vmcb)
2882 {
2883 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2884 return false;
2885
2886 if (vmcb->control.asid == 0)
2887 return false;
2888
2889 if (vmcb->control.nested_ctl && !npt_enabled)
2890 return false;
2891
2892 return true;
2893 }
2894
2895 static bool nested_svm_vmrun(struct vcpu_svm *svm)
2896 {
2897 struct vmcb *nested_vmcb;
2898 struct vmcb *hsave = svm->nested.hsave;
2899 struct vmcb *vmcb = svm->vmcb;
2900 struct page *page;
2901 u64 vmcb_gpa;
2902
2903 vmcb_gpa = svm->vmcb->save.rax;
2904
2905 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2906 if (!nested_vmcb)
2907 return false;
2908
2909 if (!nested_vmcb_checks(nested_vmcb)) {
2910 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2911 nested_vmcb->control.exit_code_hi = 0;
2912 nested_vmcb->control.exit_info_1 = 0;
2913 nested_vmcb->control.exit_info_2 = 0;
2914
2915 nested_svm_unmap(page);
2916
2917 return false;
2918 }
2919
2920 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2921 nested_vmcb->save.rip,
2922 nested_vmcb->control.int_ctl,
2923 nested_vmcb->control.event_inj,
2924 nested_vmcb->control.nested_ctl);
2925
2926 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2927 nested_vmcb->control.intercept_cr >> 16,
2928 nested_vmcb->control.intercept_exceptions,
2929 nested_vmcb->control.intercept);
2930
2931 /* Clear internal status */
2932 kvm_clear_exception_queue(&svm->vcpu);
2933 kvm_clear_interrupt_queue(&svm->vcpu);
2934
2935 /*
2936 * Save the old vmcb, so we don't need to pick what we save, but can
2937 * restore everything when a VMEXIT occurs
2938 */
2939 hsave->save.es = vmcb->save.es;
2940 hsave->save.cs = vmcb->save.cs;
2941 hsave->save.ss = vmcb->save.ss;
2942 hsave->save.ds = vmcb->save.ds;
2943 hsave->save.gdtr = vmcb->save.gdtr;
2944 hsave->save.idtr = vmcb->save.idtr;
2945 hsave->save.efer = svm->vcpu.arch.efer;
2946 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
2947 hsave->save.cr4 = svm->vcpu.arch.cr4;
2948 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2949 hsave->save.rip = kvm_rip_read(&svm->vcpu);
2950 hsave->save.rsp = vmcb->save.rsp;
2951 hsave->save.rax = vmcb->save.rax;
2952 if (npt_enabled)
2953 hsave->save.cr3 = vmcb->save.cr3;
2954 else
2955 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
2956
2957 copy_vmcb_control_area(hsave, vmcb);
2958
2959 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
2960 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2961 else
2962 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2963
2964 if (nested_vmcb->control.nested_ctl) {
2965 kvm_mmu_unload(&svm->vcpu);
2966 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2967 nested_svm_init_mmu_context(&svm->vcpu);
2968 }
2969
2970 /* Load the nested guest state */
2971 svm->vmcb->save.es = nested_vmcb->save.es;
2972 svm->vmcb->save.cs = nested_vmcb->save.cs;
2973 svm->vmcb->save.ss = nested_vmcb->save.ss;
2974 svm->vmcb->save.ds = nested_vmcb->save.ds;
2975 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2976 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2977 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
2978 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2979 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2980 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2981 if (npt_enabled) {
2982 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2983 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2984 } else
2985 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2986
2987 /* Guest paging mode is active - reset mmu */
2988 kvm_mmu_reset_context(&svm->vcpu);
2989
2990 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
2991 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2992 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2993 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
2994
2995 /* In case we don't even reach vcpu_run, the fields are not updated */
2996 svm->vmcb->save.rax = nested_vmcb->save.rax;
2997 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2998 svm->vmcb->save.rip = nested_vmcb->save.rip;
2999 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3000 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3001 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3002
3003 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
3004 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
3005
3006 /* cache intercepts */
3007 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
3008 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
3009 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3010 svm->nested.intercept = nested_vmcb->control.intercept;
3011
3012 svm_flush_tlb(&svm->vcpu);
3013 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3014 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3015 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3016 else
3017 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3018
3019 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3020 /* We only want the cr8 intercept bits of the guest */
3021 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3022 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3023 }
3024
3025 /* We don't want to see VMMCALLs from a nested guest */
3026 clr_intercept(svm, INTERCEPT_VMMCALL);
3027
3028 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3029 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3030 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3031 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
3032 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3033 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3034
3035 nested_svm_unmap(page);
3036
3037 /* Enter Guest-Mode */
3038 enter_guest_mode(&svm->vcpu);
3039
3040 /*
3041 * Merge guest and host intercepts - must be called with vcpu in
3042 * guest-mode to take affect here
3043 */
3044 recalc_intercepts(svm);
3045
3046 svm->nested.vmcb = vmcb_gpa;
3047
3048 enable_gif(svm);
3049
3050 mark_all_dirty(svm->vmcb);
3051
3052 return true;
3053 }
3054
3055 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
3056 {
3057 to_vmcb->save.fs = from_vmcb->save.fs;
3058 to_vmcb->save.gs = from_vmcb->save.gs;
3059 to_vmcb->save.tr = from_vmcb->save.tr;
3060 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3061 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3062 to_vmcb->save.star = from_vmcb->save.star;
3063 to_vmcb->save.lstar = from_vmcb->save.lstar;
3064 to_vmcb->save.cstar = from_vmcb->save.cstar;
3065 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3066 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3067 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3068 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
3069 }
3070
3071 static int vmload_interception(struct vcpu_svm *svm)
3072 {
3073 struct vmcb *nested_vmcb;
3074 struct page *page;
3075 int ret;
3076
3077 if (nested_svm_check_permissions(svm))
3078 return 1;
3079
3080 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3081 if (!nested_vmcb)
3082 return 1;
3083
3084 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3085 ret = kvm_skip_emulated_instruction(&svm->vcpu);
3086
3087 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
3088 nested_svm_unmap(page);
3089
3090 return ret;
3091 }
3092
3093 static int vmsave_interception(struct vcpu_svm *svm)
3094 {
3095 struct vmcb *nested_vmcb;
3096 struct page *page;
3097 int ret;
3098
3099 if (nested_svm_check_permissions(svm))
3100 return 1;
3101
3102 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3103 if (!nested_vmcb)
3104 return 1;
3105
3106 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3107 ret = kvm_skip_emulated_instruction(&svm->vcpu);
3108
3109 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
3110 nested_svm_unmap(page);
3111
3112 return ret;
3113 }
3114
3115 static int vmrun_interception(struct vcpu_svm *svm)
3116 {
3117 if (nested_svm_check_permissions(svm))
3118 return 1;
3119
3120 /* Save rip after vmrun instruction */
3121 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
3122
3123 if (!nested_svm_vmrun(svm))
3124 return 1;
3125
3126 if (!nested_svm_vmrun_msrpm(svm))
3127 goto failed;
3128
3129 return 1;
3130
3131 failed:
3132
3133 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3134 svm->vmcb->control.exit_code_hi = 0;
3135 svm->vmcb->control.exit_info_1 = 0;
3136 svm->vmcb->control.exit_info_2 = 0;
3137
3138 nested_svm_vmexit(svm);
3139
3140 return 1;
3141 }
3142
3143 static int stgi_interception(struct vcpu_svm *svm)
3144 {
3145 int ret;
3146
3147 if (nested_svm_check_permissions(svm))
3148 return 1;
3149
3150 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3151 ret = kvm_skip_emulated_instruction(&svm->vcpu);
3152 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3153
3154 enable_gif(svm);
3155
3156 return ret;
3157 }
3158
3159 static int clgi_interception(struct vcpu_svm *svm)
3160 {
3161 int ret;
3162
3163 if (nested_svm_check_permissions(svm))
3164 return 1;
3165
3166 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3167 ret = kvm_skip_emulated_instruction(&svm->vcpu);
3168
3169 disable_gif(svm);
3170
3171 /* After a CLGI no interrupts should come */
3172 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3173 svm_clear_vintr(svm);
3174 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3175 mark_dirty(svm->vmcb, VMCB_INTR);
3176 }
3177
3178 return ret;
3179 }
3180
3181 static int invlpga_interception(struct vcpu_svm *svm)
3182 {
3183 struct kvm_vcpu *vcpu = &svm->vcpu;
3184
3185 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3186 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3187
3188 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
3189 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3190
3191 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3192 return kvm_skip_emulated_instruction(&svm->vcpu);
3193 }
3194
3195 static int skinit_interception(struct vcpu_svm *svm)
3196 {
3197 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3198
3199 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3200 return 1;
3201 }
3202
3203 static int wbinvd_interception(struct vcpu_svm *svm)
3204 {
3205 return kvm_emulate_wbinvd(&svm->vcpu);
3206 }
3207
3208 static int xsetbv_interception(struct vcpu_svm *svm)
3209 {
3210 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3211 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3212
3213 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3214 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3215 return kvm_skip_emulated_instruction(&svm->vcpu);
3216 }
3217
3218 return 1;
3219 }
3220
3221 static int task_switch_interception(struct vcpu_svm *svm)
3222 {
3223 u16 tss_selector;
3224 int reason;
3225 int int_type = svm->vmcb->control.exit_int_info &
3226 SVM_EXITINTINFO_TYPE_MASK;
3227 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
3228 uint32_t type =
3229 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3230 uint32_t idt_v =
3231 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
3232 bool has_error_code = false;
3233 u32 error_code = 0;
3234
3235 tss_selector = (u16)svm->vmcb->control.exit_info_1;
3236
3237 if (svm->vmcb->control.exit_info_2 &
3238 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
3239 reason = TASK_SWITCH_IRET;
3240 else if (svm->vmcb->control.exit_info_2 &
3241 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3242 reason = TASK_SWITCH_JMP;
3243 else if (idt_v)
3244 reason = TASK_SWITCH_GATE;
3245 else
3246 reason = TASK_SWITCH_CALL;
3247
3248 if (reason == TASK_SWITCH_GATE) {
3249 switch (type) {
3250 case SVM_EXITINTINFO_TYPE_NMI:
3251 svm->vcpu.arch.nmi_injected = false;
3252 break;
3253 case SVM_EXITINTINFO_TYPE_EXEPT:
3254 if (svm->vmcb->control.exit_info_2 &
3255 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3256 has_error_code = true;
3257 error_code =
3258 (u32)svm->vmcb->control.exit_info_2;
3259 }
3260 kvm_clear_exception_queue(&svm->vcpu);
3261 break;
3262 case SVM_EXITINTINFO_TYPE_INTR:
3263 kvm_clear_interrupt_queue(&svm->vcpu);
3264 break;
3265 default:
3266 break;
3267 }
3268 }
3269
3270 if (reason != TASK_SWITCH_GATE ||
3271 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3272 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
3273 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3274 skip_emulated_instruction(&svm->vcpu);
3275
3276 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3277 int_vec = -1;
3278
3279 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
3280 has_error_code, error_code) == EMULATE_FAIL) {
3281 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3282 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3283 svm->vcpu.run->internal.ndata = 0;
3284 return 0;
3285 }
3286 return 1;
3287 }
3288
3289 static int cpuid_interception(struct vcpu_svm *svm)
3290 {
3291 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3292 return kvm_emulate_cpuid(&svm->vcpu);
3293 }
3294
3295 static int iret_interception(struct vcpu_svm *svm)
3296 {
3297 ++svm->vcpu.stat.nmi_window_exits;
3298 clr_intercept(svm, INTERCEPT_IRET);
3299 svm->vcpu.arch.hflags |= HF_IRET_MASK;
3300 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
3301 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3302 return 1;
3303 }
3304
3305 static int invlpg_interception(struct vcpu_svm *svm)
3306 {
3307 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3308 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3309
3310 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3311 return kvm_skip_emulated_instruction(&svm->vcpu);
3312 }
3313
3314 static int emulate_on_interception(struct vcpu_svm *svm)
3315 {
3316 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3317 }
3318
3319 static int rdpmc_interception(struct vcpu_svm *svm)
3320 {
3321 int err;
3322
3323 if (!static_cpu_has(X86_FEATURE_NRIPS))
3324 return emulate_on_interception(svm);
3325
3326 err = kvm_rdpmc(&svm->vcpu);
3327 return kvm_complete_insn_gp(&svm->vcpu, err);
3328 }
3329
3330 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3331 unsigned long val)
3332 {
3333 unsigned long cr0 = svm->vcpu.arch.cr0;
3334 bool ret = false;
3335 u64 intercept;
3336
3337 intercept = svm->nested.intercept;
3338
3339 if (!is_guest_mode(&svm->vcpu) ||
3340 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3341 return false;
3342
3343 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3344 val &= ~SVM_CR0_SELECTIVE_MASK;
3345
3346 if (cr0 ^ val) {
3347 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3348 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3349 }
3350
3351 return ret;
3352 }
3353
3354 #define CR_VALID (1ULL << 63)
3355
3356 static int cr_interception(struct vcpu_svm *svm)
3357 {
3358 int reg, cr;
3359 unsigned long val;
3360 int err;
3361
3362 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3363 return emulate_on_interception(svm);
3364
3365 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3366 return emulate_on_interception(svm);
3367
3368 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3369 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3370 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3371 else
3372 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
3373
3374 err = 0;
3375 if (cr >= 16) { /* mov to cr */
3376 cr -= 16;
3377 val = kvm_register_read(&svm->vcpu, reg);
3378 switch (cr) {
3379 case 0:
3380 if (!check_selective_cr0_intercepted(svm, val))
3381 err = kvm_set_cr0(&svm->vcpu, val);
3382 else
3383 return 1;
3384
3385 break;
3386 case 3:
3387 err = kvm_set_cr3(&svm->vcpu, val);
3388 break;
3389 case 4:
3390 err = kvm_set_cr4(&svm->vcpu, val);
3391 break;
3392 case 8:
3393 err = kvm_set_cr8(&svm->vcpu, val);
3394 break;
3395 default:
3396 WARN(1, "unhandled write to CR%d", cr);
3397 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3398 return 1;
3399 }
3400 } else { /* mov from cr */
3401 switch (cr) {
3402 case 0:
3403 val = kvm_read_cr0(&svm->vcpu);
3404 break;
3405 case 2:
3406 val = svm->vcpu.arch.cr2;
3407 break;
3408 case 3:
3409 val = kvm_read_cr3(&svm->vcpu);
3410 break;
3411 case 4:
3412 val = kvm_read_cr4(&svm->vcpu);
3413 break;
3414 case 8:
3415 val = kvm_get_cr8(&svm->vcpu);
3416 break;
3417 default:
3418 WARN(1, "unhandled read from CR%d", cr);
3419 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3420 return 1;
3421 }
3422 kvm_register_write(&svm->vcpu, reg, val);
3423 }
3424 return kvm_complete_insn_gp(&svm->vcpu, err);
3425 }
3426
3427 static int dr_interception(struct vcpu_svm *svm)
3428 {
3429 int reg, dr;
3430 unsigned long val;
3431
3432 if (svm->vcpu.guest_debug == 0) {
3433 /*
3434 * No more DR vmexits; force a reload of the debug registers
3435 * and reenter on this instruction. The next vmexit will
3436 * retrieve the full state of the debug registers.
3437 */
3438 clr_dr_intercepts(svm);
3439 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3440 return 1;
3441 }
3442
3443 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3444 return emulate_on_interception(svm);
3445
3446 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3447 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3448
3449 if (dr >= 16) { /* mov to DRn */
3450 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3451 return 1;
3452 val = kvm_register_read(&svm->vcpu, reg);
3453 kvm_set_dr(&svm->vcpu, dr - 16, val);
3454 } else {
3455 if (!kvm_require_dr(&svm->vcpu, dr))
3456 return 1;
3457 kvm_get_dr(&svm->vcpu, dr, &val);
3458 kvm_register_write(&svm->vcpu, reg, val);
3459 }
3460
3461 return kvm_skip_emulated_instruction(&svm->vcpu);
3462 }
3463
3464 static int cr8_write_interception(struct vcpu_svm *svm)
3465 {
3466 struct kvm_run *kvm_run = svm->vcpu.run;
3467 int r;
3468
3469 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3470 /* instruction emulation calls kvm_set_cr8() */
3471 r = cr_interception(svm);
3472 if (lapic_in_kernel(&svm->vcpu))
3473 return r;
3474 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3475 return r;
3476 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3477 return 0;
3478 }
3479
3480 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3481 {
3482 struct vcpu_svm *svm = to_svm(vcpu);
3483
3484 switch (msr_info->index) {
3485 case MSR_IA32_TSC: {
3486 msr_info->data = svm->vmcb->control.tsc_offset +
3487 kvm_scale_tsc(vcpu, rdtsc());
3488
3489 break;
3490 }
3491 case MSR_STAR:
3492 msr_info->data = svm->vmcb->save.star;
3493 break;
3494 #ifdef CONFIG_X86_64
3495 case MSR_LSTAR:
3496 msr_info->data = svm->vmcb->save.lstar;
3497 break;
3498 case MSR_CSTAR:
3499 msr_info->data = svm->vmcb->save.cstar;
3500 break;
3501 case MSR_KERNEL_GS_BASE:
3502 msr_info->data = svm->vmcb->save.kernel_gs_base;
3503 break;
3504 case MSR_SYSCALL_MASK:
3505 msr_info->data = svm->vmcb->save.sfmask;
3506 break;
3507 #endif
3508 case MSR_IA32_SYSENTER_CS:
3509 msr_info->data = svm->vmcb->save.sysenter_cs;
3510 break;
3511 case MSR_IA32_SYSENTER_EIP:
3512 msr_info->data = svm->sysenter_eip;
3513 break;
3514 case MSR_IA32_SYSENTER_ESP:
3515 msr_info->data = svm->sysenter_esp;
3516 break;
3517 case MSR_TSC_AUX:
3518 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3519 return 1;
3520 msr_info->data = svm->tsc_aux;
3521 break;
3522 /*
3523 * Nobody will change the following 5 values in the VMCB so we can
3524 * safely return them on rdmsr. They will always be 0 until LBRV is
3525 * implemented.
3526 */
3527 case MSR_IA32_DEBUGCTLMSR:
3528 msr_info->data = svm->vmcb->save.dbgctl;
3529 break;
3530 case MSR_IA32_LASTBRANCHFROMIP:
3531 msr_info->data = svm->vmcb->save.br_from;
3532 break;
3533 case MSR_IA32_LASTBRANCHTOIP:
3534 msr_info->data = svm->vmcb->save.br_to;
3535 break;
3536 case MSR_IA32_LASTINTFROMIP:
3537 msr_info->data = svm->vmcb->save.last_excp_from;
3538 break;
3539 case MSR_IA32_LASTINTTOIP:
3540 msr_info->data = svm->vmcb->save.last_excp_to;
3541 break;
3542 case MSR_VM_HSAVE_PA:
3543 msr_info->data = svm->nested.hsave_msr;
3544 break;
3545 case MSR_VM_CR:
3546 msr_info->data = svm->nested.vm_cr_msr;
3547 break;
3548 case MSR_IA32_UCODE_REV:
3549 msr_info->data = 0x01000065;
3550 break;
3551 case MSR_F15H_IC_CFG: {
3552
3553 int family, model;
3554
3555 family = guest_cpuid_family(vcpu);
3556 model = guest_cpuid_model(vcpu);
3557
3558 if (family < 0 || model < 0)
3559 return kvm_get_msr_common(vcpu, msr_info);
3560
3561 msr_info->data = 0;
3562
3563 if (family == 0x15 &&
3564 (model >= 0x2 && model < 0x20))
3565 msr_info->data = 0x1E;
3566 }
3567 break;
3568 default:
3569 return kvm_get_msr_common(vcpu, msr_info);
3570 }
3571 return 0;
3572 }
3573
3574 static int rdmsr_interception(struct vcpu_svm *svm)
3575 {
3576 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3577 struct msr_data msr_info;
3578
3579 msr_info.index = ecx;
3580 msr_info.host_initiated = false;
3581 if (svm_get_msr(&svm->vcpu, &msr_info)) {
3582 trace_kvm_msr_read_ex(ecx);
3583 kvm_inject_gp(&svm->vcpu, 0);
3584 return 1;
3585 } else {
3586 trace_kvm_msr_read(ecx, msr_info.data);
3587
3588 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3589 msr_info.data & 0xffffffff);
3590 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3591 msr_info.data >> 32);
3592 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3593 return kvm_skip_emulated_instruction(&svm->vcpu);
3594 }
3595 }
3596
3597 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3598 {
3599 struct vcpu_svm *svm = to_svm(vcpu);
3600 int svm_dis, chg_mask;
3601
3602 if (data & ~SVM_VM_CR_VALID_MASK)
3603 return 1;
3604
3605 chg_mask = SVM_VM_CR_VALID_MASK;
3606
3607 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3608 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3609
3610 svm->nested.vm_cr_msr &= ~chg_mask;
3611 svm->nested.vm_cr_msr |= (data & chg_mask);
3612
3613 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3614
3615 /* check for svm_disable while efer.svme is set */
3616 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3617 return 1;
3618
3619 return 0;
3620 }
3621
3622 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3623 {
3624 struct vcpu_svm *svm = to_svm(vcpu);
3625
3626 u32 ecx = msr->index;
3627 u64 data = msr->data;
3628 switch (ecx) {
3629 case MSR_IA32_TSC:
3630 kvm_write_tsc(vcpu, msr);
3631 break;
3632 case MSR_STAR:
3633 svm->vmcb->save.star = data;
3634 break;
3635 #ifdef CONFIG_X86_64
3636 case MSR_LSTAR:
3637 svm->vmcb->save.lstar = data;
3638 break;
3639 case MSR_CSTAR:
3640 svm->vmcb->save.cstar = data;
3641 break;
3642 case MSR_KERNEL_GS_BASE:
3643 svm->vmcb->save.kernel_gs_base = data;
3644 break;
3645 case MSR_SYSCALL_MASK:
3646 svm->vmcb->save.sfmask = data;
3647 break;
3648 #endif
3649 case MSR_IA32_SYSENTER_CS:
3650 svm->vmcb->save.sysenter_cs = data;
3651 break;
3652 case MSR_IA32_SYSENTER_EIP:
3653 svm->sysenter_eip = data;
3654 svm->vmcb->save.sysenter_eip = data;
3655 break;
3656 case MSR_IA32_SYSENTER_ESP:
3657 svm->sysenter_esp = data;
3658 svm->vmcb->save.sysenter_esp = data;
3659 break;
3660 case MSR_TSC_AUX:
3661 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3662 return 1;
3663
3664 /*
3665 * This is rare, so we update the MSR here instead of using
3666 * direct_access_msrs. Doing that would require a rdmsr in
3667 * svm_vcpu_put.
3668 */
3669 svm->tsc_aux = data;
3670 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
3671 break;
3672 case MSR_IA32_DEBUGCTLMSR:
3673 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3674 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3675 __func__, data);
3676 break;
3677 }
3678 if (data & DEBUGCTL_RESERVED_BITS)
3679 return 1;
3680
3681 svm->vmcb->save.dbgctl = data;
3682 mark_dirty(svm->vmcb, VMCB_LBR);
3683 if (data & (1ULL<<0))
3684 svm_enable_lbrv(svm);
3685 else
3686 svm_disable_lbrv(svm);
3687 break;
3688 case MSR_VM_HSAVE_PA:
3689 svm->nested.hsave_msr = data;
3690 break;
3691 case MSR_VM_CR:
3692 return svm_set_vm_cr(vcpu, data);
3693 case MSR_VM_IGNNE:
3694 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3695 break;
3696 case MSR_IA32_APICBASE:
3697 if (kvm_vcpu_apicv_active(vcpu))
3698 avic_update_vapic_bar(to_svm(vcpu), data);
3699 /* Follow through */
3700 default:
3701 return kvm_set_msr_common(vcpu, msr);
3702 }
3703 return 0;
3704 }
3705
3706 static int wrmsr_interception(struct vcpu_svm *svm)
3707 {
3708 struct msr_data msr;
3709 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3710 u64 data = kvm_read_edx_eax(&svm->vcpu);
3711
3712 msr.data = data;
3713 msr.index = ecx;
3714 msr.host_initiated = false;
3715
3716 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3717 if (kvm_set_msr(&svm->vcpu, &msr)) {
3718 trace_kvm_msr_write_ex(ecx, data);
3719 kvm_inject_gp(&svm->vcpu, 0);
3720 return 1;
3721 } else {
3722 trace_kvm_msr_write(ecx, data);
3723 return kvm_skip_emulated_instruction(&svm->vcpu);
3724 }
3725 }
3726
3727 static int msr_interception(struct vcpu_svm *svm)
3728 {
3729 if (svm->vmcb->control.exit_info_1)
3730 return wrmsr_interception(svm);
3731 else
3732 return rdmsr_interception(svm);
3733 }
3734
3735 static int interrupt_window_interception(struct vcpu_svm *svm)
3736 {
3737 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3738 svm_clear_vintr(svm);
3739 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3740 mark_dirty(svm->vmcb, VMCB_INTR);
3741 ++svm->vcpu.stat.irq_window_exits;
3742 return 1;
3743 }
3744
3745 static int pause_interception(struct vcpu_svm *svm)
3746 {
3747 kvm_vcpu_on_spin(&(svm->vcpu));
3748 return 1;
3749 }
3750
3751 static int nop_interception(struct vcpu_svm *svm)
3752 {
3753 return kvm_skip_emulated_instruction(&(svm->vcpu));
3754 }
3755
3756 static int monitor_interception(struct vcpu_svm *svm)
3757 {
3758 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
3759 return nop_interception(svm);
3760 }
3761
3762 static int mwait_interception(struct vcpu_svm *svm)
3763 {
3764 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
3765 return nop_interception(svm);
3766 }
3767
3768 enum avic_ipi_failure_cause {
3769 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
3770 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
3771 AVIC_IPI_FAILURE_INVALID_TARGET,
3772 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
3773 };
3774
3775 static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
3776 {
3777 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
3778 u32 icrl = svm->vmcb->control.exit_info_1;
3779 u32 id = svm->vmcb->control.exit_info_2 >> 32;
3780 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
3781 struct kvm_lapic *apic = svm->vcpu.arch.apic;
3782
3783 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
3784
3785 switch (id) {
3786 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
3787 /*
3788 * AVIC hardware handles the generation of
3789 * IPIs when the specified Message Type is Fixed
3790 * (also known as fixed delivery mode) and
3791 * the Trigger Mode is edge-triggered. The hardware
3792 * also supports self and broadcast delivery modes
3793 * specified via the Destination Shorthand(DSH)
3794 * field of the ICRL. Logical and physical APIC ID
3795 * formats are supported. All other IPI types cause
3796 * a #VMEXIT, which needs to emulated.
3797 */
3798 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
3799 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
3800 break;
3801 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
3802 int i;
3803 struct kvm_vcpu *vcpu;
3804 struct kvm *kvm = svm->vcpu.kvm;
3805 struct kvm_lapic *apic = svm->vcpu.arch.apic;
3806
3807 /*
3808 * At this point, we expect that the AVIC HW has already
3809 * set the appropriate IRR bits on the valid target
3810 * vcpus. So, we just need to kick the appropriate vcpu.
3811 */
3812 kvm_for_each_vcpu(i, vcpu, kvm) {
3813 bool m = kvm_apic_match_dest(vcpu, apic,
3814 icrl & KVM_APIC_SHORT_MASK,
3815 GET_APIC_DEST_FIELD(icrh),
3816 icrl & KVM_APIC_DEST_MASK);
3817
3818 if (m && !avic_vcpu_is_running(vcpu))
3819 kvm_vcpu_wake_up(vcpu);
3820 }
3821 break;
3822 }
3823 case AVIC_IPI_FAILURE_INVALID_TARGET:
3824 break;
3825 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
3826 WARN_ONCE(1, "Invalid backing page\n");
3827 break;
3828 default:
3829 pr_err("Unknown IPI interception\n");
3830 }
3831
3832 return 1;
3833 }
3834
3835 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
3836 {
3837 struct kvm_arch *vm_data = &vcpu->kvm->arch;
3838 int index;
3839 u32 *logical_apic_id_table;
3840 int dlid = GET_APIC_LOGICAL_ID(ldr);
3841
3842 if (!dlid)
3843 return NULL;
3844
3845 if (flat) { /* flat */
3846 index = ffs(dlid) - 1;
3847 if (index > 7)
3848 return NULL;
3849 } else { /* cluster */
3850 int cluster = (dlid & 0xf0) >> 4;
3851 int apic = ffs(dlid & 0x0f) - 1;
3852
3853 if ((apic < 0) || (apic > 7) ||
3854 (cluster >= 0xf))
3855 return NULL;
3856 index = (cluster << 2) + apic;
3857 }
3858
3859 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
3860
3861 return &logical_apic_id_table[index];
3862 }
3863
3864 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
3865 bool valid)
3866 {
3867 bool flat;
3868 u32 *entry, new_entry;
3869
3870 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
3871 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
3872 if (!entry)
3873 return -EINVAL;
3874
3875 new_entry = READ_ONCE(*entry);
3876 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
3877 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
3878 if (valid)
3879 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
3880 else
3881 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
3882 WRITE_ONCE(*entry, new_entry);
3883
3884 return 0;
3885 }
3886
3887 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
3888 {
3889 int ret;
3890 struct vcpu_svm *svm = to_svm(vcpu);
3891 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
3892
3893 if (!ldr)
3894 return 1;
3895
3896 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
3897 if (ret && svm->ldr_reg) {
3898 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
3899 svm->ldr_reg = 0;
3900 } else {
3901 svm->ldr_reg = ldr;
3902 }
3903 return ret;
3904 }
3905
3906 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
3907 {
3908 u64 *old, *new;
3909 struct vcpu_svm *svm = to_svm(vcpu);
3910 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
3911 u32 id = (apic_id_reg >> 24) & 0xff;
3912
3913 if (vcpu->vcpu_id == id)
3914 return 0;
3915
3916 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
3917 new = avic_get_physical_id_entry(vcpu, id);
3918 if (!new || !old)
3919 return 1;
3920
3921 /* We need to move physical_id_entry to new offset */
3922 *new = *old;
3923 *old = 0ULL;
3924 to_svm(vcpu)->avic_physical_id_cache = new;
3925
3926 /*
3927 * Also update the guest physical APIC ID in the logical
3928 * APIC ID table entry if already setup the LDR.
3929 */
3930 if (svm->ldr_reg)
3931 avic_handle_ldr_update(vcpu);
3932
3933 return 0;
3934 }
3935
3936 static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
3937 {
3938 struct vcpu_svm *svm = to_svm(vcpu);
3939 struct kvm_arch *vm_data = &vcpu->kvm->arch;
3940 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
3941 u32 mod = (dfr >> 28) & 0xf;
3942
3943 /*
3944 * We assume that all local APICs are using the same type.
3945 * If this changes, we need to flush the AVIC logical
3946 * APID id table.
3947 */
3948 if (vm_data->ldr_mode == mod)
3949 return 0;
3950
3951 clear_page(page_address(vm_data->avic_logical_id_table_page));
3952 vm_data->ldr_mode = mod;
3953
3954 if (svm->ldr_reg)
3955 avic_handle_ldr_update(vcpu);
3956 return 0;
3957 }
3958
3959 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
3960 {
3961 struct kvm_lapic *apic = svm->vcpu.arch.apic;
3962 u32 offset = svm->vmcb->control.exit_info_1 &
3963 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
3964
3965 switch (offset) {
3966 case APIC_ID:
3967 if (avic_handle_apic_id_update(&svm->vcpu))
3968 return 0;
3969 break;
3970 case APIC_LDR:
3971 if (avic_handle_ldr_update(&svm->vcpu))
3972 return 0;
3973 break;
3974 case APIC_DFR:
3975 avic_handle_dfr_update(&svm->vcpu);
3976 break;
3977 default:
3978 break;
3979 }
3980
3981 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
3982
3983 return 1;
3984 }
3985
3986 static bool is_avic_unaccelerated_access_trap(u32 offset)
3987 {
3988 bool ret = false;
3989
3990 switch (offset) {
3991 case APIC_ID:
3992 case APIC_EOI:
3993 case APIC_RRR:
3994 case APIC_LDR:
3995 case APIC_DFR:
3996 case APIC_SPIV:
3997 case APIC_ESR:
3998 case APIC_ICR:
3999 case APIC_LVTT:
4000 case APIC_LVTTHMR:
4001 case APIC_LVTPC:
4002 case APIC_LVT0:
4003 case APIC_LVT1:
4004 case APIC_LVTERR:
4005 case APIC_TMICT:
4006 case APIC_TDCR:
4007 ret = true;
4008 break;
4009 default:
4010 break;
4011 }
4012 return ret;
4013 }
4014
4015 static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4016 {
4017 int ret = 0;
4018 u32 offset = svm->vmcb->control.exit_info_1 &
4019 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4020 u32 vector = svm->vmcb->control.exit_info_2 &
4021 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4022 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4023 AVIC_UNACCEL_ACCESS_WRITE_MASK;
4024 bool trap = is_avic_unaccelerated_access_trap(offset);
4025
4026 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4027 trap, write, vector);
4028 if (trap) {
4029 /* Handling Trap */
4030 WARN_ONCE(!write, "svm: Handling trap read.\n");
4031 ret = avic_unaccel_trap_write(svm);
4032 } else {
4033 /* Handling Fault */
4034 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4035 }
4036
4037 return ret;
4038 }
4039
4040 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4041 [SVM_EXIT_READ_CR0] = cr_interception,
4042 [SVM_EXIT_READ_CR3] = cr_interception,
4043 [SVM_EXIT_READ_CR4] = cr_interception,
4044 [SVM_EXIT_READ_CR8] = cr_interception,
4045 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
4046 [SVM_EXIT_WRITE_CR0] = cr_interception,
4047 [SVM_EXIT_WRITE_CR3] = cr_interception,
4048 [SVM_EXIT_WRITE_CR4] = cr_interception,
4049 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
4050 [SVM_EXIT_READ_DR0] = dr_interception,
4051 [SVM_EXIT_READ_DR1] = dr_interception,
4052 [SVM_EXIT_READ_DR2] = dr_interception,
4053 [SVM_EXIT_READ_DR3] = dr_interception,
4054 [SVM_EXIT_READ_DR4] = dr_interception,
4055 [SVM_EXIT_READ_DR5] = dr_interception,
4056 [SVM_EXIT_READ_DR6] = dr_interception,
4057 [SVM_EXIT_READ_DR7] = dr_interception,
4058 [SVM_EXIT_WRITE_DR0] = dr_interception,
4059 [SVM_EXIT_WRITE_DR1] = dr_interception,
4060 [SVM_EXIT_WRITE_DR2] = dr_interception,
4061 [SVM_EXIT_WRITE_DR3] = dr_interception,
4062 [SVM_EXIT_WRITE_DR4] = dr_interception,
4063 [SVM_EXIT_WRITE_DR5] = dr_interception,
4064 [SVM_EXIT_WRITE_DR6] = dr_interception,
4065 [SVM_EXIT_WRITE_DR7] = dr_interception,
4066 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
4067 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
4068 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
4069 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
4070 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
4071 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
4072 [SVM_EXIT_INTR] = intr_interception,
4073 [SVM_EXIT_NMI] = nmi_interception,
4074 [SVM_EXIT_SMI] = nop_on_interception,
4075 [SVM_EXIT_INIT] = nop_on_interception,
4076 [SVM_EXIT_VINTR] = interrupt_window_interception,
4077 [SVM_EXIT_RDPMC] = rdpmc_interception,
4078 [SVM_EXIT_CPUID] = cpuid_interception,
4079 [SVM_EXIT_IRET] = iret_interception,
4080 [SVM_EXIT_INVD] = emulate_on_interception,
4081 [SVM_EXIT_PAUSE] = pause_interception,
4082 [SVM_EXIT_HLT] = halt_interception,
4083 [SVM_EXIT_INVLPG] = invlpg_interception,
4084 [SVM_EXIT_INVLPGA] = invlpga_interception,
4085 [SVM_EXIT_IOIO] = io_interception,
4086 [SVM_EXIT_MSR] = msr_interception,
4087 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
4088 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
4089 [SVM_EXIT_VMRUN] = vmrun_interception,
4090 [SVM_EXIT_VMMCALL] = vmmcall_interception,
4091 [SVM_EXIT_VMLOAD] = vmload_interception,
4092 [SVM_EXIT_VMSAVE] = vmsave_interception,
4093 [SVM_EXIT_STGI] = stgi_interception,
4094 [SVM_EXIT_CLGI] = clgi_interception,
4095 [SVM_EXIT_SKINIT] = skinit_interception,
4096 [SVM_EXIT_WBINVD] = wbinvd_interception,
4097 [SVM_EXIT_MONITOR] = monitor_interception,
4098 [SVM_EXIT_MWAIT] = mwait_interception,
4099 [SVM_EXIT_XSETBV] = xsetbv_interception,
4100 [SVM_EXIT_NPF] = pf_interception,
4101 [SVM_EXIT_RSM] = emulate_on_interception,
4102 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4103 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
4104 };
4105
4106 static void dump_vmcb(struct kvm_vcpu *vcpu)
4107 {
4108 struct vcpu_svm *svm = to_svm(vcpu);
4109 struct vmcb_control_area *control = &svm->vmcb->control;
4110 struct vmcb_save_area *save = &svm->vmcb->save;
4111
4112 pr_err("VMCB Control Area:\n");
4113 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4114 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4115 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4116 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4117 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4118 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4119 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4120 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4121 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4122 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4123 pr_err("%-20s%d\n", "asid:", control->asid);
4124 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4125 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4126 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4127 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4128 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4129 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4130 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4131 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4132 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4133 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4134 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
4135 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
4136 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4137 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
4138 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
4139 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
4140 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4141 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4142 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
4143 pr_err("VMCB State Save Area:\n");
4144 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4145 "es:",
4146 save->es.selector, save->es.attrib,
4147 save->es.limit, save->es.base);
4148 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4149 "cs:",
4150 save->cs.selector, save->cs.attrib,
4151 save->cs.limit, save->cs.base);
4152 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4153 "ss:",
4154 save->ss.selector, save->ss.attrib,
4155 save->ss.limit, save->ss.base);
4156 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4157 "ds:",
4158 save->ds.selector, save->ds.attrib,
4159 save->ds.limit, save->ds.base);
4160 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4161 "fs:",
4162 save->fs.selector, save->fs.attrib,
4163 save->fs.limit, save->fs.base);
4164 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4165 "gs:",
4166 save->gs.selector, save->gs.attrib,
4167 save->gs.limit, save->gs.base);
4168 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4169 "gdtr:",
4170 save->gdtr.selector, save->gdtr.attrib,
4171 save->gdtr.limit, save->gdtr.base);
4172 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4173 "ldtr:",
4174 save->ldtr.selector, save->ldtr.attrib,
4175 save->ldtr.limit, save->ldtr.base);
4176 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4177 "idtr:",
4178 save->idtr.selector, save->idtr.attrib,
4179 save->idtr.limit, save->idtr.base);
4180 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4181 "tr:",
4182 save->tr.selector, save->tr.attrib,
4183 save->tr.limit, save->tr.base);
4184 pr_err("cpl: %d efer: %016llx\n",
4185 save->cpl, save->efer);
4186 pr_err("%-15s %016llx %-13s %016llx\n",
4187 "cr0:", save->cr0, "cr2:", save->cr2);
4188 pr_err("%-15s %016llx %-13s %016llx\n",
4189 "cr3:", save->cr3, "cr4:", save->cr4);
4190 pr_err("%-15s %016llx %-13s %016llx\n",
4191 "dr6:", save->dr6, "dr7:", save->dr7);
4192 pr_err("%-15s %016llx %-13s %016llx\n",
4193 "rip:", save->rip, "rflags:", save->rflags);
4194 pr_err("%-15s %016llx %-13s %016llx\n",
4195 "rsp:", save->rsp, "rax:", save->rax);
4196 pr_err("%-15s %016llx %-13s %016llx\n",
4197 "star:", save->star, "lstar:", save->lstar);
4198 pr_err("%-15s %016llx %-13s %016llx\n",
4199 "cstar:", save->cstar, "sfmask:", save->sfmask);
4200 pr_err("%-15s %016llx %-13s %016llx\n",
4201 "kernel_gs_base:", save->kernel_gs_base,
4202 "sysenter_cs:", save->sysenter_cs);
4203 pr_err("%-15s %016llx %-13s %016llx\n",
4204 "sysenter_esp:", save->sysenter_esp,
4205 "sysenter_eip:", save->sysenter_eip);
4206 pr_err("%-15s %016llx %-13s %016llx\n",
4207 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4208 pr_err("%-15s %016llx %-13s %016llx\n",
4209 "br_from:", save->br_from, "br_to:", save->br_to);
4210 pr_err("%-15s %016llx %-13s %016llx\n",
4211 "excp_from:", save->last_excp_from,
4212 "excp_to:", save->last_excp_to);
4213 }
4214
4215 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4216 {
4217 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4218
4219 *info1 = control->exit_info_1;
4220 *info2 = control->exit_info_2;
4221 }
4222
4223 static int handle_exit(struct kvm_vcpu *vcpu)
4224 {
4225 struct vcpu_svm *svm = to_svm(vcpu);
4226 struct kvm_run *kvm_run = vcpu->run;
4227 u32 exit_code = svm->vmcb->control.exit_code;
4228
4229 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4230
4231 vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
4232
4233 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
4234 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4235 if (npt_enabled)
4236 vcpu->arch.cr3 = svm->vmcb->save.cr3;
4237
4238 if (unlikely(svm->nested.exit_required)) {
4239 nested_svm_vmexit(svm);
4240 svm->nested.exit_required = false;
4241
4242 return 1;
4243 }
4244
4245 if (is_guest_mode(vcpu)) {
4246 int vmexit;
4247
4248 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4249 svm->vmcb->control.exit_info_1,
4250 svm->vmcb->control.exit_info_2,
4251 svm->vmcb->control.exit_int_info,
4252 svm->vmcb->control.exit_int_info_err,
4253 KVM_ISA_SVM);
4254
4255 vmexit = nested_svm_exit_special(svm);
4256
4257 if (vmexit == NESTED_EXIT_CONTINUE)
4258 vmexit = nested_svm_exit_handled(svm);
4259
4260 if (vmexit == NESTED_EXIT_DONE)
4261 return 1;
4262 }
4263
4264 svm_complete_interrupts(svm);
4265
4266 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4267 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4268 kvm_run->fail_entry.hardware_entry_failure_reason
4269 = svm->vmcb->control.exit_code;
4270 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4271 dump_vmcb(vcpu);
4272 return 0;
4273 }
4274
4275 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
4276 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
4277 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4278 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
4279 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
4280 "exit_code 0x%x\n",
4281 __func__, svm->vmcb->control.exit_int_info,
4282 exit_code);
4283
4284 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
4285 || !svm_exit_handlers[exit_code]) {
4286 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
4287 kvm_queue_exception(vcpu, UD_VECTOR);
4288 return 1;
4289 }
4290
4291 return svm_exit_handlers[exit_code](svm);
4292 }
4293
4294 static void reload_tss(struct kvm_vcpu *vcpu)
4295 {
4296 int cpu = raw_smp_processor_id();
4297
4298 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4299 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
4300 load_TR_desc();
4301 }
4302
4303 static void pre_svm_run(struct vcpu_svm *svm)
4304 {
4305 int cpu = raw_smp_processor_id();
4306
4307 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4308
4309 /* FIXME: handle wraparound of asid_generation */
4310 if (svm->asid_generation != sd->asid_generation)
4311 new_asid(svm, sd);
4312 }
4313
4314 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4315 {
4316 struct vcpu_svm *svm = to_svm(vcpu);
4317
4318 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4319 vcpu->arch.hflags |= HF_NMI_MASK;
4320 set_intercept(svm, INTERCEPT_IRET);
4321 ++vcpu->stat.nmi_injections;
4322 }
4323
4324 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
4325 {
4326 struct vmcb_control_area *control;
4327
4328 /* The following fields are ignored when AVIC is enabled */
4329 control = &svm->vmcb->control;
4330 control->int_vector = irq;
4331 control->int_ctl &= ~V_INTR_PRIO_MASK;
4332 control->int_ctl |= V_IRQ_MASK |
4333 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
4334 mark_dirty(svm->vmcb, VMCB_INTR);
4335 }
4336
4337 static void svm_set_irq(struct kvm_vcpu *vcpu)
4338 {
4339 struct vcpu_svm *svm = to_svm(vcpu);
4340
4341 BUG_ON(!(gif_set(svm)));
4342
4343 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4344 ++vcpu->stat.irq_injections;
4345
4346 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4347 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
4348 }
4349
4350 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4351 {
4352 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4353 }
4354
4355 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
4356 {
4357 struct vcpu_svm *svm = to_svm(vcpu);
4358
4359 if (svm_nested_virtualize_tpr(vcpu) ||
4360 kvm_vcpu_apicv_active(vcpu))
4361 return;
4362
4363 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4364
4365 if (irr == -1)
4366 return;
4367
4368 if (tpr >= irr)
4369 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4370 }
4371
4372 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4373 {
4374 return;
4375 }
4376
4377 static bool svm_get_enable_apicv(void)
4378 {
4379 return avic;
4380 }
4381
4382 static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4383 {
4384 }
4385
4386 static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
4387 {
4388 }
4389
4390 /* Note: Currently only used by Hyper-V. */
4391 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4392 {
4393 struct vcpu_svm *svm = to_svm(vcpu);
4394 struct vmcb *vmcb = svm->vmcb;
4395
4396 if (!avic)
4397 return;
4398
4399 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4400 mark_dirty(vmcb, VMCB_INTR);
4401 }
4402
4403 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
4404 {
4405 return;
4406 }
4407
4408 static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4409 {
4410 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4411 smp_mb__after_atomic();
4412
4413 if (avic_vcpu_is_running(vcpu))
4414 wrmsrl(SVM_AVIC_DOORBELL,
4415 kvm_cpu_get_apicid(vcpu->cpu));
4416 else
4417 kvm_vcpu_wake_up(vcpu);
4418 }
4419
4420 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4421 {
4422 unsigned long flags;
4423 struct amd_svm_iommu_ir *cur;
4424
4425 spin_lock_irqsave(&svm->ir_list_lock, flags);
4426 list_for_each_entry(cur, &svm->ir_list, node) {
4427 if (cur->data != pi->ir_data)
4428 continue;
4429 list_del(&cur->node);
4430 kfree(cur);
4431 break;
4432 }
4433 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4434 }
4435
4436 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4437 {
4438 int ret = 0;
4439 unsigned long flags;
4440 struct amd_svm_iommu_ir *ir;
4441
4442 /**
4443 * In some cases, the existing irte is updaed and re-set,
4444 * so we need to check here if it's already been * added
4445 * to the ir_list.
4446 */
4447 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4448 struct kvm *kvm = svm->vcpu.kvm;
4449 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4450 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4451 struct vcpu_svm *prev_svm;
4452
4453 if (!prev_vcpu) {
4454 ret = -EINVAL;
4455 goto out;
4456 }
4457
4458 prev_svm = to_svm(prev_vcpu);
4459 svm_ir_list_del(prev_svm, pi);
4460 }
4461
4462 /**
4463 * Allocating new amd_iommu_pi_data, which will get
4464 * add to the per-vcpu ir_list.
4465 */
4466 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
4467 if (!ir) {
4468 ret = -ENOMEM;
4469 goto out;
4470 }
4471 ir->data = pi->ir_data;
4472
4473 spin_lock_irqsave(&svm->ir_list_lock, flags);
4474 list_add(&ir->node, &svm->ir_list);
4475 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4476 out:
4477 return ret;
4478 }
4479
4480 /**
4481 * Note:
4482 * The HW cannot support posting multicast/broadcast
4483 * interrupts to a vCPU. So, we still use legacy interrupt
4484 * remapping for these kind of interrupts.
4485 *
4486 * For lowest-priority interrupts, we only support
4487 * those with single CPU as the destination, e.g. user
4488 * configures the interrupts via /proc/irq or uses
4489 * irqbalance to make the interrupts single-CPU.
4490 */
4491 static int
4492 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
4493 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
4494 {
4495 struct kvm_lapic_irq irq;
4496 struct kvm_vcpu *vcpu = NULL;
4497
4498 kvm_set_msi_irq(kvm, e, &irq);
4499
4500 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
4501 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
4502 __func__, irq.vector);
4503 return -1;
4504 }
4505
4506 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
4507 irq.vector);
4508 *svm = to_svm(vcpu);
4509 vcpu_info->pi_desc_addr = page_to_phys((*svm)->avic_backing_page);
4510 vcpu_info->vector = irq.vector;
4511
4512 return 0;
4513 }
4514
4515 /*
4516 * svm_update_pi_irte - set IRTE for Posted-Interrupts
4517 *
4518 * @kvm: kvm
4519 * @host_irq: host irq of the interrupt
4520 * @guest_irq: gsi of the interrupt
4521 * @set: set or unset PI
4522 * returns 0 on success, < 0 on failure
4523 */
4524 static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4525 uint32_t guest_irq, bool set)
4526 {
4527 struct kvm_kernel_irq_routing_entry *e;
4528 struct kvm_irq_routing_table *irq_rt;
4529 int idx, ret = -EINVAL;
4530
4531 if (!kvm_arch_has_assigned_device(kvm) ||
4532 !irq_remapping_cap(IRQ_POSTING_CAP))
4533 return 0;
4534
4535 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
4536 __func__, host_irq, guest_irq, set);
4537
4538 idx = srcu_read_lock(&kvm->irq_srcu);
4539 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4540 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4541
4542 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4543 struct vcpu_data vcpu_info;
4544 struct vcpu_svm *svm = NULL;
4545
4546 if (e->type != KVM_IRQ_ROUTING_MSI)
4547 continue;
4548
4549 /**
4550 * Here, we setup with legacy mode in the following cases:
4551 * 1. When cannot target interrupt to a specific vcpu.
4552 * 2. Unsetting posted interrupt.
4553 * 3. APIC virtialization is disabled for the vcpu.
4554 */
4555 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
4556 kvm_vcpu_apicv_active(&svm->vcpu)) {
4557 struct amd_iommu_pi_data pi;
4558
4559 /* Try to enable guest_mode in IRTE */
4560 pi.base = page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK;
4561 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
4562 svm->vcpu.vcpu_id);
4563 pi.is_guest_mode = true;
4564 pi.vcpu_data = &vcpu_info;
4565 ret = irq_set_vcpu_affinity(host_irq, &pi);
4566
4567 /**
4568 * Here, we successfully setting up vcpu affinity in
4569 * IOMMU guest mode. Now, we need to store the posted
4570 * interrupt information in a per-vcpu ir_list so that
4571 * we can reference to them directly when we update vcpu
4572 * scheduling information in IOMMU irte.
4573 */
4574 if (!ret && pi.is_guest_mode)
4575 svm_ir_list_add(svm, &pi);
4576 } else {
4577 /* Use legacy mode in IRTE */
4578 struct amd_iommu_pi_data pi;
4579
4580 /**
4581 * Here, pi is used to:
4582 * - Tell IOMMU to use legacy mode for this interrupt.
4583 * - Retrieve ga_tag of prior interrupt remapping data.
4584 */
4585 pi.is_guest_mode = false;
4586 ret = irq_set_vcpu_affinity(host_irq, &pi);
4587
4588 /**
4589 * Check if the posted interrupt was previously
4590 * setup with the guest_mode by checking if the ga_tag
4591 * was cached. If so, we need to clean up the per-vcpu
4592 * ir_list.
4593 */
4594 if (!ret && pi.prev_ga_tag) {
4595 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
4596 struct kvm_vcpu *vcpu;
4597
4598 vcpu = kvm_get_vcpu_by_id(kvm, id);
4599 if (vcpu)
4600 svm_ir_list_del(to_svm(vcpu), &pi);
4601 }
4602 }
4603
4604 if (!ret && svm) {
4605 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
4606 host_irq, e->gsi,
4607 vcpu_info.vector,
4608 vcpu_info.pi_desc_addr, set);
4609 }
4610
4611 if (ret < 0) {
4612 pr_err("%s: failed to update PI IRTE\n", __func__);
4613 goto out;
4614 }
4615 }
4616
4617 ret = 0;
4618 out:
4619 srcu_read_unlock(&kvm->irq_srcu, idx);
4620 return ret;
4621 }
4622
4623 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
4624 {
4625 struct vcpu_svm *svm = to_svm(vcpu);
4626 struct vmcb *vmcb = svm->vmcb;
4627 int ret;
4628 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
4629 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
4630 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
4631
4632 return ret;
4633 }
4634
4635 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
4636 {
4637 struct vcpu_svm *svm = to_svm(vcpu);
4638
4639 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
4640 }
4641
4642 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4643 {
4644 struct vcpu_svm *svm = to_svm(vcpu);
4645
4646 if (masked) {
4647 svm->vcpu.arch.hflags |= HF_NMI_MASK;
4648 set_intercept(svm, INTERCEPT_IRET);
4649 } else {
4650 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
4651 clr_intercept(svm, INTERCEPT_IRET);
4652 }
4653 }
4654
4655 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
4656 {
4657 struct vcpu_svm *svm = to_svm(vcpu);
4658 struct vmcb *vmcb = svm->vmcb;
4659 int ret;
4660
4661 if (!gif_set(svm) ||
4662 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
4663 return 0;
4664
4665 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
4666
4667 if (is_guest_mode(vcpu))
4668 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
4669
4670 return ret;
4671 }
4672
4673 static void enable_irq_window(struct kvm_vcpu *vcpu)
4674 {
4675 struct vcpu_svm *svm = to_svm(vcpu);
4676
4677 if (kvm_vcpu_apicv_active(vcpu))
4678 return;
4679
4680 /*
4681 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
4682 * 1, because that's a separate STGI/VMRUN intercept. The next time we
4683 * get that intercept, this function will be called again though and
4684 * we'll get the vintr intercept.
4685 */
4686 if (gif_set(svm) && nested_svm_intr(svm)) {
4687 svm_set_vintr(svm);
4688 svm_inject_irq(svm, 0x0);
4689 }
4690 }
4691
4692 static void enable_nmi_window(struct kvm_vcpu *vcpu)
4693 {
4694 struct vcpu_svm *svm = to_svm(vcpu);
4695
4696 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
4697 == HF_NMI_MASK)
4698 return; /* IRET will cause a vm exit */
4699
4700 if ((svm->vcpu.arch.hflags & HF_GIF_MASK) == 0)
4701 return; /* STGI will cause a vm exit */
4702
4703 if (svm->nested.exit_required)
4704 return; /* we're not going to run the guest yet */
4705
4706 /*
4707 * Something prevents NMI from been injected. Single step over possible
4708 * problem (IRET or exception injection or interrupt shadow)
4709 */
4710 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
4711 svm->nmi_singlestep = true;
4712 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
4713 }
4714
4715 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
4716 {
4717 return 0;
4718 }
4719
4720 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
4721 {
4722 struct vcpu_svm *svm = to_svm(vcpu);
4723
4724 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
4725 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4726 else
4727 svm->asid_generation--;
4728 }
4729
4730 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
4731 {
4732 }
4733
4734 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
4735 {
4736 struct vcpu_svm *svm = to_svm(vcpu);
4737
4738 if (svm_nested_virtualize_tpr(vcpu))
4739 return;
4740
4741 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
4742 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
4743 kvm_set_cr8(vcpu, cr8);
4744 }
4745 }
4746
4747 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
4748 {
4749 struct vcpu_svm *svm = to_svm(vcpu);
4750 u64 cr8;
4751
4752 if (svm_nested_virtualize_tpr(vcpu) ||
4753 kvm_vcpu_apicv_active(vcpu))
4754 return;
4755
4756 cr8 = kvm_get_cr8(vcpu);
4757 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
4758 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
4759 }
4760
4761 static void svm_complete_interrupts(struct vcpu_svm *svm)
4762 {
4763 u8 vector;
4764 int type;
4765 u32 exitintinfo = svm->vmcb->control.exit_int_info;
4766 unsigned int3_injected = svm->int3_injected;
4767
4768 svm->int3_injected = 0;
4769
4770 /*
4771 * If we've made progress since setting HF_IRET_MASK, we've
4772 * executed an IRET and can allow NMI injection.
4773 */
4774 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
4775 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
4776 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
4777 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4778 }
4779
4780 svm->vcpu.arch.nmi_injected = false;
4781 kvm_clear_exception_queue(&svm->vcpu);
4782 kvm_clear_interrupt_queue(&svm->vcpu);
4783
4784 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4785 return;
4786
4787 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4788
4789 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4790 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4791
4792 switch (type) {
4793 case SVM_EXITINTINFO_TYPE_NMI:
4794 svm->vcpu.arch.nmi_injected = true;
4795 break;
4796 case SVM_EXITINTINFO_TYPE_EXEPT:
4797 /*
4798 * In case of software exceptions, do not reinject the vector,
4799 * but re-execute the instruction instead. Rewind RIP first
4800 * if we emulated INT3 before.
4801 */
4802 if (kvm_exception_is_soft(vector)) {
4803 if (vector == BP_VECTOR && int3_injected &&
4804 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
4805 kvm_rip_write(&svm->vcpu,
4806 kvm_rip_read(&svm->vcpu) -
4807 int3_injected);
4808 break;
4809 }
4810 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
4811 u32 err = svm->vmcb->control.exit_int_info_err;
4812 kvm_requeue_exception_e(&svm->vcpu, vector, err);
4813
4814 } else
4815 kvm_requeue_exception(&svm->vcpu, vector);
4816 break;
4817 case SVM_EXITINTINFO_TYPE_INTR:
4818 kvm_queue_interrupt(&svm->vcpu, vector, false);
4819 break;
4820 default:
4821 break;
4822 }
4823 }
4824
4825 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
4826 {
4827 struct vcpu_svm *svm = to_svm(vcpu);
4828 struct vmcb_control_area *control = &svm->vmcb->control;
4829
4830 control->exit_int_info = control->event_inj;
4831 control->exit_int_info_err = control->event_inj_err;
4832 control->event_inj = 0;
4833 svm_complete_interrupts(svm);
4834 }
4835
4836 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
4837 {
4838 struct vcpu_svm *svm = to_svm(vcpu);
4839
4840 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4841 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4842 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4843
4844 /*
4845 * A vmexit emulation is required before the vcpu can be executed
4846 * again.
4847 */
4848 if (unlikely(svm->nested.exit_required))
4849 return;
4850
4851 /*
4852 * Disable singlestep if we're injecting an interrupt/exception.
4853 * We don't want our modified rflags to be pushed on the stack where
4854 * we might not be able to easily reset them if we disabled NMI
4855 * singlestep later.
4856 */
4857 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4858 /*
4859 * Event injection happens before external interrupts cause a
4860 * vmexit and interrupts are disabled here, so smp_send_reschedule
4861 * is enough to force an immediate vmexit.
4862 */
4863 disable_nmi_singlestep(svm);
4864 smp_send_reschedule(vcpu->cpu);
4865 }
4866
4867 pre_svm_run(svm);
4868
4869 sync_lapic_to_cr8(vcpu);
4870
4871 svm->vmcb->save.cr2 = vcpu->arch.cr2;
4872
4873 clgi();
4874
4875 local_irq_enable();
4876
4877 asm volatile (
4878 "push %%" _ASM_BP "; \n\t"
4879 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
4880 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
4881 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
4882 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
4883 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
4884 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
4885 #ifdef CONFIG_X86_64
4886 "mov %c[r8](%[svm]), %%r8 \n\t"
4887 "mov %c[r9](%[svm]), %%r9 \n\t"
4888 "mov %c[r10](%[svm]), %%r10 \n\t"
4889 "mov %c[r11](%[svm]), %%r11 \n\t"
4890 "mov %c[r12](%[svm]), %%r12 \n\t"
4891 "mov %c[r13](%[svm]), %%r13 \n\t"
4892 "mov %c[r14](%[svm]), %%r14 \n\t"
4893 "mov %c[r15](%[svm]), %%r15 \n\t"
4894 #endif
4895
4896 /* Enter guest mode */
4897 "push %%" _ASM_AX " \n\t"
4898 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
4899 __ex(SVM_VMLOAD) "\n\t"
4900 __ex(SVM_VMRUN) "\n\t"
4901 __ex(SVM_VMSAVE) "\n\t"
4902 "pop %%" _ASM_AX " \n\t"
4903
4904 /* Save guest registers, load host registers */
4905 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
4906 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
4907 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
4908 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
4909 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
4910 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
4911 #ifdef CONFIG_X86_64
4912 "mov %%r8, %c[r8](%[svm]) \n\t"
4913 "mov %%r9, %c[r9](%[svm]) \n\t"
4914 "mov %%r10, %c[r10](%[svm]) \n\t"
4915 "mov %%r11, %c[r11](%[svm]) \n\t"
4916 "mov %%r12, %c[r12](%[svm]) \n\t"
4917 "mov %%r13, %c[r13](%[svm]) \n\t"
4918 "mov %%r14, %c[r14](%[svm]) \n\t"
4919 "mov %%r15, %c[r15](%[svm]) \n\t"
4920 #endif
4921 "pop %%" _ASM_BP
4922 :
4923 : [svm]"a"(svm),
4924 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
4925 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
4926 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
4927 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
4928 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
4929 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
4930 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
4931 #ifdef CONFIG_X86_64
4932 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
4933 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
4934 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
4935 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
4936 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
4937 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
4938 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
4939 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
4940 #endif
4941 : "cc", "memory"
4942 #ifdef CONFIG_X86_64
4943 , "rbx", "rcx", "rdx", "rsi", "rdi"
4944 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
4945 #else
4946 , "ebx", "ecx", "edx", "esi", "edi"
4947 #endif
4948 );
4949
4950 #ifdef CONFIG_X86_64
4951 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
4952 #else
4953 loadsegment(fs, svm->host.fs);
4954 #ifndef CONFIG_X86_32_LAZY_GS
4955 loadsegment(gs, svm->host.gs);
4956 #endif
4957 #endif
4958
4959 reload_tss(vcpu);
4960
4961 local_irq_disable();
4962
4963 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4964 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4965 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4966 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4967
4968 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4969 kvm_before_handle_nmi(&svm->vcpu);
4970
4971 stgi();
4972
4973 /* Any pending NMI will happen here */
4974
4975 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4976 kvm_after_handle_nmi(&svm->vcpu);
4977
4978 sync_cr8_to_lapic(vcpu);
4979
4980 svm->next_rip = 0;
4981
4982 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4983
4984 /* if exit due to PF check for async PF */
4985 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4986 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
4987
4988 if (npt_enabled) {
4989 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
4990 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
4991 }
4992
4993 /*
4994 * We need to handle MC intercepts here before the vcpu has a chance to
4995 * change the physical cpu
4996 */
4997 if (unlikely(svm->vmcb->control.exit_code ==
4998 SVM_EXIT_EXCP_BASE + MC_VECTOR))
4999 svm_handle_mce(svm);
5000
5001 mark_all_clean(svm->vmcb);
5002 }
5003 STACK_FRAME_NON_STANDARD(svm_vcpu_run);
5004
5005 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5006 {
5007 struct vcpu_svm *svm = to_svm(vcpu);
5008
5009 svm->vmcb->save.cr3 = root;
5010 mark_dirty(svm->vmcb, VMCB_CR);
5011 svm_flush_tlb(vcpu);
5012 }
5013
5014 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5015 {
5016 struct vcpu_svm *svm = to_svm(vcpu);
5017
5018 svm->vmcb->control.nested_cr3 = root;
5019 mark_dirty(svm->vmcb, VMCB_NPT);
5020
5021 /* Also sync guest cr3 here in case we live migrate */
5022 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
5023 mark_dirty(svm->vmcb, VMCB_CR);
5024
5025 svm_flush_tlb(vcpu);
5026 }
5027
5028 static int is_disabled(void)
5029 {
5030 u64 vm_cr;
5031
5032 rdmsrl(MSR_VM_CR, vm_cr);
5033 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5034 return 1;
5035
5036 return 0;
5037 }
5038
5039 static void
5040 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5041 {
5042 /*
5043 * Patch in the VMMCALL instruction:
5044 */
5045 hypercall[0] = 0x0f;
5046 hypercall[1] = 0x01;
5047 hypercall[2] = 0xd9;
5048 }
5049
5050 static void svm_check_processor_compat(void *rtn)
5051 {
5052 *(int *)rtn = 0;
5053 }
5054
5055 static bool svm_cpu_has_accelerated_tpr(void)
5056 {
5057 return false;
5058 }
5059
5060 static bool svm_has_high_real_mode_segbase(void)
5061 {
5062 return true;
5063 }
5064
5065 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5066 {
5067 return 0;
5068 }
5069
5070 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5071 {
5072 struct vcpu_svm *svm = to_svm(vcpu);
5073 struct kvm_cpuid_entry2 *entry;
5074
5075 /* Update nrips enabled cache */
5076 svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
5077
5078 if (!kvm_vcpu_apicv_active(vcpu))
5079 return;
5080
5081 entry = kvm_find_cpuid_entry(vcpu, 1, 0);
5082 if (entry)
5083 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5084 }
5085
5086 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5087 {
5088 switch (func) {
5089 case 0x1:
5090 if (avic)
5091 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5092 break;
5093 case 0x80000001:
5094 if (nested)
5095 entry->ecx |= (1 << 2); /* Set SVM bit */
5096 break;
5097 case 0x8000000A:
5098 entry->eax = 1; /* SVM revision 1 */
5099 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5100 ASID emulation to nested SVM */
5101 entry->ecx = 0; /* Reserved */
5102 entry->edx = 0; /* Per default do not support any
5103 additional features */
5104
5105 /* Support next_rip if host supports it */
5106 if (boot_cpu_has(X86_FEATURE_NRIPS))
5107 entry->edx |= SVM_FEATURE_NRIP;
5108
5109 /* Support NPT for the guest if enabled */
5110 if (npt_enabled)
5111 entry->edx |= SVM_FEATURE_NPT;
5112
5113 break;
5114 }
5115 }
5116
5117 static int svm_get_lpage_level(void)
5118 {
5119 return PT_PDPE_LEVEL;
5120 }
5121
5122 static bool svm_rdtscp_supported(void)
5123 {
5124 return boot_cpu_has(X86_FEATURE_RDTSCP);
5125 }
5126
5127 static bool svm_invpcid_supported(void)
5128 {
5129 return false;
5130 }
5131
5132 static bool svm_mpx_supported(void)
5133 {
5134 return false;
5135 }
5136
5137 static bool svm_xsaves_supported(void)
5138 {
5139 return false;
5140 }
5141
5142 static bool svm_has_wbinvd_exit(void)
5143 {
5144 return true;
5145 }
5146
5147 #define PRE_EX(exit) { .exit_code = (exit), \
5148 .stage = X86_ICPT_PRE_EXCEPT, }
5149 #define POST_EX(exit) { .exit_code = (exit), \
5150 .stage = X86_ICPT_POST_EXCEPT, }
5151 #define POST_MEM(exit) { .exit_code = (exit), \
5152 .stage = X86_ICPT_POST_MEMACCESS, }
5153
5154 static const struct __x86_intercept {
5155 u32 exit_code;
5156 enum x86_intercept_stage stage;
5157 } x86_intercept_map[] = {
5158 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5159 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5160 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5161 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5162 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
5163 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5164 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
5165 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5166 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5167 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5168 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5169 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5170 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5171 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5172 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
5173 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5174 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5175 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5176 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5177 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5178 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5179 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5180 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
5181 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5182 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5183 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
5184 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5185 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5186 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5187 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5188 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5189 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5190 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5191 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5192 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
5193 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5194 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5195 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5196 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5197 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5198 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5199 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
5200 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5201 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5202 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5203 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
5204 };
5205
5206 #undef PRE_EX
5207 #undef POST_EX
5208 #undef POST_MEM
5209
5210 static int svm_check_intercept(struct kvm_vcpu *vcpu,
5211 struct x86_instruction_info *info,
5212 enum x86_intercept_stage stage)
5213 {
5214 struct vcpu_svm *svm = to_svm(vcpu);
5215 int vmexit, ret = X86EMUL_CONTINUE;
5216 struct __x86_intercept icpt_info;
5217 struct vmcb *vmcb = svm->vmcb;
5218
5219 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5220 goto out;
5221
5222 icpt_info = x86_intercept_map[info->intercept];
5223
5224 if (stage != icpt_info.stage)
5225 goto out;
5226
5227 switch (icpt_info.exit_code) {
5228 case SVM_EXIT_READ_CR0:
5229 if (info->intercept == x86_intercept_cr_read)
5230 icpt_info.exit_code += info->modrm_reg;
5231 break;
5232 case SVM_EXIT_WRITE_CR0: {
5233 unsigned long cr0, val;
5234 u64 intercept;
5235
5236 if (info->intercept == x86_intercept_cr_write)
5237 icpt_info.exit_code += info->modrm_reg;
5238
5239 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5240 info->intercept == x86_intercept_clts)
5241 break;
5242
5243 intercept = svm->nested.intercept;
5244
5245 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5246 break;
5247
5248 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5249 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5250
5251 if (info->intercept == x86_intercept_lmsw) {
5252 cr0 &= 0xfUL;
5253 val &= 0xfUL;
5254 /* lmsw can't clear PE - catch this here */
5255 if (cr0 & X86_CR0_PE)
5256 val |= X86_CR0_PE;
5257 }
5258
5259 if (cr0 ^ val)
5260 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5261
5262 break;
5263 }
5264 case SVM_EXIT_READ_DR0:
5265 case SVM_EXIT_WRITE_DR0:
5266 icpt_info.exit_code += info->modrm_reg;
5267 break;
5268 case SVM_EXIT_MSR:
5269 if (info->intercept == x86_intercept_wrmsr)
5270 vmcb->control.exit_info_1 = 1;
5271 else
5272 vmcb->control.exit_info_1 = 0;
5273 break;
5274 case SVM_EXIT_PAUSE:
5275 /*
5276 * We get this for NOP only, but pause
5277 * is rep not, check this here
5278 */
5279 if (info->rep_prefix != REPE_PREFIX)
5280 goto out;
5281 case SVM_EXIT_IOIO: {
5282 u64 exit_info;
5283 u32 bytes;
5284
5285 if (info->intercept == x86_intercept_in ||
5286 info->intercept == x86_intercept_ins) {
5287 exit_info = ((info->src_val & 0xffff) << 16) |
5288 SVM_IOIO_TYPE_MASK;
5289 bytes = info->dst_bytes;
5290 } else {
5291 exit_info = (info->dst_val & 0xffff) << 16;
5292 bytes = info->src_bytes;
5293 }
5294
5295 if (info->intercept == x86_intercept_outs ||
5296 info->intercept == x86_intercept_ins)
5297 exit_info |= SVM_IOIO_STR_MASK;
5298
5299 if (info->rep_prefix)
5300 exit_info |= SVM_IOIO_REP_MASK;
5301
5302 bytes = min(bytes, 4u);
5303
5304 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5305
5306 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5307
5308 vmcb->control.exit_info_1 = exit_info;
5309 vmcb->control.exit_info_2 = info->next_rip;
5310
5311 break;
5312 }
5313 default:
5314 break;
5315 }
5316
5317 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5318 if (static_cpu_has(X86_FEATURE_NRIPS))
5319 vmcb->control.next_rip = info->next_rip;
5320 vmcb->control.exit_code = icpt_info.exit_code;
5321 vmexit = nested_svm_exit_handled(svm);
5322
5323 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5324 : X86EMUL_CONTINUE;
5325
5326 out:
5327 return ret;
5328 }
5329
5330 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5331 {
5332 local_irq_enable();
5333 /*
5334 * We must have an instruction with interrupts enabled, so
5335 * the timer interrupt isn't delayed by the interrupt shadow.
5336 */
5337 asm("nop");
5338 local_irq_disable();
5339 }
5340
5341 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5342 {
5343 }
5344
5345 static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5346 {
5347 if (avic_handle_apic_id_update(vcpu) != 0)
5348 return;
5349 if (avic_handle_dfr_update(vcpu) != 0)
5350 return;
5351 avic_handle_ldr_update(vcpu);
5352 }
5353
5354 static void svm_setup_mce(struct kvm_vcpu *vcpu)
5355 {
5356 /* [63:9] are reserved. */
5357 vcpu->arch.mcg_cap &= 0x1ff;
5358 }
5359
5360 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5361 .cpu_has_kvm_support = has_svm,
5362 .disabled_by_bios = is_disabled,
5363 .hardware_setup = svm_hardware_setup,
5364 .hardware_unsetup = svm_hardware_unsetup,
5365 .check_processor_compatibility = svm_check_processor_compat,
5366 .hardware_enable = svm_hardware_enable,
5367 .hardware_disable = svm_hardware_disable,
5368 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
5369 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
5370
5371 .vcpu_create = svm_create_vcpu,
5372 .vcpu_free = svm_free_vcpu,
5373 .vcpu_reset = svm_vcpu_reset,
5374
5375 .vm_init = avic_vm_init,
5376 .vm_destroy = avic_vm_destroy,
5377
5378 .prepare_guest_switch = svm_prepare_guest_switch,
5379 .vcpu_load = svm_vcpu_load,
5380 .vcpu_put = svm_vcpu_put,
5381 .vcpu_blocking = svm_vcpu_blocking,
5382 .vcpu_unblocking = svm_vcpu_unblocking,
5383
5384 .update_bp_intercept = update_bp_intercept,
5385 .get_msr = svm_get_msr,
5386 .set_msr = svm_set_msr,
5387 .get_segment_base = svm_get_segment_base,
5388 .get_segment = svm_get_segment,
5389 .set_segment = svm_set_segment,
5390 .get_cpl = svm_get_cpl,
5391 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
5392 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
5393 .decache_cr3 = svm_decache_cr3,
5394 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
5395 .set_cr0 = svm_set_cr0,
5396 .set_cr3 = svm_set_cr3,
5397 .set_cr4 = svm_set_cr4,
5398 .set_efer = svm_set_efer,
5399 .get_idt = svm_get_idt,
5400 .set_idt = svm_set_idt,
5401 .get_gdt = svm_get_gdt,
5402 .set_gdt = svm_set_gdt,
5403 .get_dr6 = svm_get_dr6,
5404 .set_dr6 = svm_set_dr6,
5405 .set_dr7 = svm_set_dr7,
5406 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
5407 .cache_reg = svm_cache_reg,
5408 .get_rflags = svm_get_rflags,
5409 .set_rflags = svm_set_rflags,
5410
5411 .tlb_flush = svm_flush_tlb,
5412
5413 .run = svm_vcpu_run,
5414 .handle_exit = handle_exit,
5415 .skip_emulated_instruction = skip_emulated_instruction,
5416 .set_interrupt_shadow = svm_set_interrupt_shadow,
5417 .get_interrupt_shadow = svm_get_interrupt_shadow,
5418 .patch_hypercall = svm_patch_hypercall,
5419 .set_irq = svm_set_irq,
5420 .set_nmi = svm_inject_nmi,
5421 .queue_exception = svm_queue_exception,
5422 .cancel_injection = svm_cancel_injection,
5423 .interrupt_allowed = svm_interrupt_allowed,
5424 .nmi_allowed = svm_nmi_allowed,
5425 .get_nmi_mask = svm_get_nmi_mask,
5426 .set_nmi_mask = svm_set_nmi_mask,
5427 .enable_nmi_window = enable_nmi_window,
5428 .enable_irq_window = enable_irq_window,
5429 .update_cr8_intercept = update_cr8_intercept,
5430 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
5431 .get_enable_apicv = svm_get_enable_apicv,
5432 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
5433 .load_eoi_exitmap = svm_load_eoi_exitmap,
5434 .hwapic_irr_update = svm_hwapic_irr_update,
5435 .hwapic_isr_update = svm_hwapic_isr_update,
5436 .apicv_post_state_restore = avic_post_state_restore,
5437
5438 .set_tss_addr = svm_set_tss_addr,
5439 .get_tdp_level = get_npt_level,
5440 .get_mt_mask = svm_get_mt_mask,
5441
5442 .get_exit_info = svm_get_exit_info,
5443
5444 .get_lpage_level = svm_get_lpage_level,
5445
5446 .cpuid_update = svm_cpuid_update,
5447
5448 .rdtscp_supported = svm_rdtscp_supported,
5449 .invpcid_supported = svm_invpcid_supported,
5450 .mpx_supported = svm_mpx_supported,
5451 .xsaves_supported = svm_xsaves_supported,
5452
5453 .set_supported_cpuid = svm_set_supported_cpuid,
5454
5455 .has_wbinvd_exit = svm_has_wbinvd_exit,
5456
5457 .write_tsc_offset = svm_write_tsc_offset,
5458
5459 .set_tdp_cr3 = set_tdp_cr3,
5460
5461 .check_intercept = svm_check_intercept,
5462 .handle_external_intr = svm_handle_external_intr,
5463
5464 .sched_in = svm_sched_in,
5465
5466 .pmu_ops = &amd_pmu_ops,
5467 .deliver_posted_interrupt = svm_deliver_avic_intr,
5468 .update_pi_irte = svm_update_pi_irte,
5469 .setup_mce = svm_setup_mce,
5470 };
5471
5472 static int __init svm_init(void)
5473 {
5474 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
5475 __alignof__(struct vcpu_svm), THIS_MODULE);
5476 }
5477
5478 static void __exit svm_exit(void)
5479 {
5480 kvm_exit();
5481 }
5482
5483 module_init(svm_init)
5484 module_exit(svm_exit)