]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kvm/svm.c
x86: Add definition for IGNNE MSR
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kvm / svm.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
edf88417
AK
16#include <linux/kvm_host.h>
17
85f455f7 18#include "irq.h"
1d737c8a 19#include "mmu.h"
5fdbf976 20#include "kvm_cache_regs.h"
fe4c7b19 21#include "x86.h"
e495606d 22
6aa8b732 23#include <linux/module.h>
9d8f549d 24#include <linux/kernel.h>
6aa8b732
AK
25#include <linux/vmalloc.h>
26#include <linux/highmem.h>
e8edc6e0 27#include <linux/sched.h>
6aa8b732 28
e495606d 29#include <asm/desc.h>
6aa8b732 30
63d1142f
EH
31#include <asm/virtext.h>
32
4ecac3fd
AK
33#define __ex(x) __kvm_handle_fault_on_reboot(x)
34
6aa8b732
AK
35MODULE_AUTHOR("Qumranet");
36MODULE_LICENSE("GPL");
37
38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1
40
6aa8b732
AK
41#define SEG_TYPE_LDT 2
42#define SEG_TYPE_BUSY_TSS16 3
43
80b7706e
JR
44#define SVM_FEATURE_NPT (1 << 0)
45#define SVM_FEATURE_LBRV (1 << 1)
94c935a1 46#define SVM_FEATURE_SVML (1 << 2)
80b7706e 47
24e09cbf
JR
48#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
49
c0725420
AG
50/* Turn on to get debugging output*/
51/* #define NESTED_DEBUG */
52
53#ifdef NESTED_DEBUG
54#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
55#else
56#define nsvm_printk(fmt, args...) do {} while(0)
57#endif
58
6c8166a7
AK
59static const u32 host_save_user_msrs[] = {
60#ifdef CONFIG_X86_64
61 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
62 MSR_FS_BASE,
63#endif
64 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
65};
66
67#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
68
69struct kvm_vcpu;
70
71struct vcpu_svm {
72 struct kvm_vcpu vcpu;
73 struct vmcb *vmcb;
74 unsigned long vmcb_pa;
75 struct svm_cpu_data *svm_data;
76 uint64_t asid_generation;
77 uint64_t sysenter_esp;
78 uint64_t sysenter_eip;
79
80 u64 next_rip;
81
82 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
83 u64 host_gs_base;
6c8166a7
AK
84
85 u32 *msrpm;
86 struct vmcb *hsave;
87 u64 hsave_msr;
88
89 u64 nested_vmcb;
90
91 /* These are the merged vectors */
92 u32 *nested_msrpm;
93
94 /* gpa pointers to the real vectors */
95 u64 nested_vmcb_msrpm;
96};
97
709ddebf
JR
98/* enable NPT for AMD64 and X86 with PAE */
99#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
100static bool npt_enabled = true;
101#else
e3da3acd 102static bool npt_enabled = false;
709ddebf 103#endif
6c7dac72
JR
104static int npt = 1;
105
106module_param(npt, int, S_IRUGO);
e3da3acd 107
236de055
AG
108static int nested = 0;
109module_param(nested, int, S_IRUGO);
110
44874f84 111static void svm_flush_tlb(struct kvm_vcpu *vcpu);
04d2cc77 112
cf74a78b
AG
113static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
114static int nested_svm_vmexit(struct vcpu_svm *svm);
115static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
116 void *arg2, void *opaque);
117static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
118 bool has_error_code, u32 error_code);
119
a2fa3e9f
GH
120static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
121{
fb3f0f51 122 return container_of(vcpu, struct vcpu_svm, vcpu);
a2fa3e9f
GH
123}
124
3d6368ef
AG
125static inline bool is_nested(struct vcpu_svm *svm)
126{
127 return svm->nested_vmcb;
128}
129
4866d5e3 130static unsigned long iopm_base;
6aa8b732
AK
131
132struct kvm_ldttss_desc {
133 u16 limit0;
134 u16 base0;
135 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
136 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
137 u32 base3;
138 u32 zero1;
139} __attribute__((packed));
140
141struct svm_cpu_data {
142 int cpu;
143
5008fdf5
AK
144 u64 asid_generation;
145 u32 max_asid;
146 u32 next_asid;
6aa8b732
AK
147 struct kvm_ldttss_desc *tss_desc;
148
149 struct page *save_area;
150};
151
152static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
80b7706e 153static uint32_t svm_features;
6aa8b732
AK
154
155struct svm_init_data {
156 int cpu;
157 int r;
158};
159
160static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
161
9d8f549d 162#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
6aa8b732
AK
163#define MSRS_RANGE_SIZE 2048
164#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
165
166#define MAX_INST_SIZE 15
167
80b7706e
JR
168static inline u32 svm_has(u32 feat)
169{
170 return svm_features & feat;
171}
172
6aa8b732
AK
173static inline void clgi(void)
174{
4ecac3fd 175 asm volatile (__ex(SVM_CLGI));
6aa8b732
AK
176}
177
178static inline void stgi(void)
179{
4ecac3fd 180 asm volatile (__ex(SVM_STGI));
6aa8b732
AK
181}
182
183static inline void invlpga(unsigned long addr, u32 asid)
184{
4ecac3fd 185 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
6aa8b732
AK
186}
187
6aa8b732
AK
188static inline void force_new_asid(struct kvm_vcpu *vcpu)
189{
a2fa3e9f 190 to_svm(vcpu)->asid_generation--;
6aa8b732
AK
191}
192
193static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
194{
195 force_new_asid(vcpu);
196}
197
198static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
199{
709ddebf 200 if (!npt_enabled && !(efer & EFER_LMA))
2b5203ee 201 efer &= ~EFER_LME;
6aa8b732 202
9962d032 203 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
ad312c7c 204 vcpu->arch.shadow_efer = efer;
6aa8b732
AK
205}
206
298101da
AK
207static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
208 bool has_error_code, u32 error_code)
209{
210 struct vcpu_svm *svm = to_svm(vcpu);
211
cf74a78b
AG
212 /* If we are within a nested VM we'd better #VMEXIT and let the
213 guest handle the exception */
214 if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
215 return;
216
298101da
AK
217 svm->vmcb->control.event_inj = nr
218 | SVM_EVTINJ_VALID
219 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
220 | SVM_EVTINJ_TYPE_EXEPT;
221 svm->vmcb->control.event_inj_err = error_code;
222}
223
6aa8b732
AK
224static int is_external_interrupt(u32 info)
225{
226 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
227 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
228}
229
2809f5d2
GC
230static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
231{
232 struct vcpu_svm *svm = to_svm(vcpu);
233 u32 ret = 0;
234
235 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
236 ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
237 return ret & mask;
238}
239
240static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
241{
242 struct vcpu_svm *svm = to_svm(vcpu);
243
244 if (mask == 0)
245 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
246 else
247 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
248
249}
250
6aa8b732
AK
251static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
252{
a2fa3e9f
GH
253 struct vcpu_svm *svm = to_svm(vcpu);
254
255 if (!svm->next_rip) {
f629cf84
GN
256 if (emulate_instruction(vcpu, vcpu->run, 0, 0, EMULTYPE_SKIP) !=
257 EMULATE_DONE)
258 printk(KERN_DEBUG "%s: NOP\n", __func__);
6aa8b732
AK
259 return;
260 }
5fdbf976
MT
261 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
262 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
263 __func__, kvm_rip_read(vcpu), svm->next_rip);
6aa8b732 264
5fdbf976 265 kvm_rip_write(vcpu, svm->next_rip);
2809f5d2 266 svm_set_interrupt_shadow(vcpu, 0);
6aa8b732
AK
267}
268
269static int has_svm(void)
270{
63d1142f 271 const char *msg;
6aa8b732 272
63d1142f 273 if (!cpu_has_svm(&msg)) {
ff81ff10 274 printk(KERN_INFO "has_svm: %s\n", msg);
6aa8b732
AK
275 return 0;
276 }
277
6aa8b732
AK
278 return 1;
279}
280
281static void svm_hardware_disable(void *garbage)
282{
2c8dceeb 283 cpu_svm_disable();
6aa8b732
AK
284}
285
286static void svm_hardware_enable(void *garbage)
287{
288
289 struct svm_cpu_data *svm_data;
290 uint64_t efer;
6aa8b732 291 struct desc_ptr gdt_descr;
6aa8b732
AK
292 struct desc_struct *gdt;
293 int me = raw_smp_processor_id();
294
295 if (!has_svm()) {
296 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
297 return;
298 }
299 svm_data = per_cpu(svm_data, me);
300
301 if (!svm_data) {
302 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
303 me);
304 return;
305 }
306
307 svm_data->asid_generation = 1;
308 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
309 svm_data->next_asid = svm_data->max_asid + 1;
310
d77c26fc 311 asm volatile ("sgdt %0" : "=m"(gdt_descr));
6aa8b732
AK
312 gdt = (struct desc_struct *)gdt_descr.address;
313 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
314
315 rdmsrl(MSR_EFER, efer);
9962d032 316 wrmsrl(MSR_EFER, efer | EFER_SVME);
6aa8b732
AK
317
318 wrmsrl(MSR_VM_HSAVE_PA,
319 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
320}
321
0da1db75
JR
322static void svm_cpu_uninit(int cpu)
323{
324 struct svm_cpu_data *svm_data
325 = per_cpu(svm_data, raw_smp_processor_id());
326
327 if (!svm_data)
328 return;
329
330 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
331 __free_page(svm_data->save_area);
332 kfree(svm_data);
333}
334
6aa8b732
AK
335static int svm_cpu_init(int cpu)
336{
337 struct svm_cpu_data *svm_data;
338 int r;
339
340 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
341 if (!svm_data)
342 return -ENOMEM;
343 svm_data->cpu = cpu;
344 svm_data->save_area = alloc_page(GFP_KERNEL);
345 r = -ENOMEM;
346 if (!svm_data->save_area)
347 goto err_1;
348
349 per_cpu(svm_data, cpu) = svm_data;
350
351 return 0;
352
353err_1:
354 kfree(svm_data);
355 return r;
356
357}
358
bfc733a7
RR
359static void set_msr_interception(u32 *msrpm, unsigned msr,
360 int read, int write)
6aa8b732
AK
361{
362 int i;
363
364 for (i = 0; i < NUM_MSR_MAPS; i++) {
365 if (msr >= msrpm_ranges[i] &&
366 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
367 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
368 msrpm_ranges[i]) * 2;
369
370 u32 *base = msrpm + (msr_offset / 32);
371 u32 msr_shift = msr_offset % 32;
372 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
373 *base = (*base & ~(0x3 << msr_shift)) |
374 (mask << msr_shift);
bfc733a7 375 return;
6aa8b732
AK
376 }
377 }
bfc733a7 378 BUG();
6aa8b732
AK
379}
380
f65c229c
JR
381static void svm_vcpu_init_msrpm(u32 *msrpm)
382{
383 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
384
385#ifdef CONFIG_X86_64
386 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
387 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
388 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
389 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
390 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
391 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
392#endif
393 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
394 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
f65c229c
JR
395}
396
24e09cbf
JR
397static void svm_enable_lbrv(struct vcpu_svm *svm)
398{
399 u32 *msrpm = svm->msrpm;
400
401 svm->vmcb->control.lbr_ctl = 1;
402 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
403 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
404 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
405 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
406}
407
408static void svm_disable_lbrv(struct vcpu_svm *svm)
409{
410 u32 *msrpm = svm->msrpm;
411
412 svm->vmcb->control.lbr_ctl = 0;
413 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
414 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
415 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
416 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
417}
418
6aa8b732
AK
419static __init int svm_hardware_setup(void)
420{
421 int cpu;
422 struct page *iopm_pages;
f65c229c 423 void *iopm_va;
6aa8b732
AK
424 int r;
425
6aa8b732
AK
426 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
427
428 if (!iopm_pages)
429 return -ENOMEM;
c8681339
AL
430
431 iopm_va = page_address(iopm_pages);
432 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
6aa8b732
AK
433 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
434
50a37eb4
JR
435 if (boot_cpu_has(X86_FEATURE_NX))
436 kvm_enable_efer_bits(EFER_NX);
437
1b2fd70c
AG
438 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
439 kvm_enable_efer_bits(EFER_FFXSR);
440
236de055
AG
441 if (nested) {
442 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
443 kvm_enable_efer_bits(EFER_SVME);
444 }
445
6aa8b732
AK
446 for_each_online_cpu(cpu) {
447 r = svm_cpu_init(cpu);
448 if (r)
f65c229c 449 goto err;
6aa8b732 450 }
33bd6a0b
JR
451
452 svm_features = cpuid_edx(SVM_CPUID_FUNC);
453
e3da3acd
JR
454 if (!svm_has(SVM_FEATURE_NPT))
455 npt_enabled = false;
456
6c7dac72
JR
457 if (npt_enabled && !npt) {
458 printk(KERN_INFO "kvm: Nested Paging disabled\n");
459 npt_enabled = false;
460 }
461
18552672 462 if (npt_enabled) {
e3da3acd 463 printk(KERN_INFO "kvm: Nested Paging enabled\n");
18552672 464 kvm_enable_tdp();
5f4cb662
JR
465 } else
466 kvm_disable_tdp();
e3da3acd 467
6aa8b732
AK
468 return 0;
469
f65c229c 470err:
6aa8b732
AK
471 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
472 iopm_base = 0;
473 return r;
474}
475
476static __exit void svm_hardware_unsetup(void)
477{
0da1db75
JR
478 int cpu;
479
480 for_each_online_cpu(cpu)
481 svm_cpu_uninit(cpu);
482
6aa8b732 483 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
f65c229c 484 iopm_base = 0;
6aa8b732
AK
485}
486
487static void init_seg(struct vmcb_seg *seg)
488{
489 seg->selector = 0;
490 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
491 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
492 seg->limit = 0xffff;
493 seg->base = 0;
494}
495
496static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
497{
498 seg->selector = 0;
499 seg->attrib = SVM_SELECTOR_P_MASK | type;
500 seg->limit = 0xffff;
501 seg->base = 0;
502}
503
e6101a96 504static void init_vmcb(struct vcpu_svm *svm)
6aa8b732 505{
e6101a96
JR
506 struct vmcb_control_area *control = &svm->vmcb->control;
507 struct vmcb_save_area *save = &svm->vmcb->save;
6aa8b732
AK
508
509 control->intercept_cr_read = INTERCEPT_CR0_MASK |
510 INTERCEPT_CR3_MASK |
649d6864 511 INTERCEPT_CR4_MASK;
6aa8b732
AK
512
513 control->intercept_cr_write = INTERCEPT_CR0_MASK |
514 INTERCEPT_CR3_MASK |
80a8119c
AK
515 INTERCEPT_CR4_MASK |
516 INTERCEPT_CR8_MASK;
6aa8b732
AK
517
518 control->intercept_dr_read = INTERCEPT_DR0_MASK |
519 INTERCEPT_DR1_MASK |
520 INTERCEPT_DR2_MASK |
521 INTERCEPT_DR3_MASK;
522
523 control->intercept_dr_write = INTERCEPT_DR0_MASK |
524 INTERCEPT_DR1_MASK |
525 INTERCEPT_DR2_MASK |
526 INTERCEPT_DR3_MASK |
527 INTERCEPT_DR5_MASK |
528 INTERCEPT_DR7_MASK;
529
7aa81cc0 530 control->intercept_exceptions = (1 << PF_VECTOR) |
53371b50
JR
531 (1 << UD_VECTOR) |
532 (1 << MC_VECTOR);
6aa8b732
AK
533
534
535 control->intercept = (1ULL << INTERCEPT_INTR) |
536 (1ULL << INTERCEPT_NMI) |
0152527b 537 (1ULL << INTERCEPT_SMI) |
6aa8b732 538 (1ULL << INTERCEPT_CPUID) |
cf5a94d1 539 (1ULL << INTERCEPT_INVD) |
6aa8b732 540 (1ULL << INTERCEPT_HLT) |
a7052897 541 (1ULL << INTERCEPT_INVLPG) |
6aa8b732
AK
542 (1ULL << INTERCEPT_INVLPGA) |
543 (1ULL << INTERCEPT_IOIO_PROT) |
544 (1ULL << INTERCEPT_MSR_PROT) |
545 (1ULL << INTERCEPT_TASK_SWITCH) |
46fe4ddd 546 (1ULL << INTERCEPT_SHUTDOWN) |
6aa8b732
AK
547 (1ULL << INTERCEPT_VMRUN) |
548 (1ULL << INTERCEPT_VMMCALL) |
549 (1ULL << INTERCEPT_VMLOAD) |
550 (1ULL << INTERCEPT_VMSAVE) |
551 (1ULL << INTERCEPT_STGI) |
552 (1ULL << INTERCEPT_CLGI) |
916ce236 553 (1ULL << INTERCEPT_SKINIT) |
cf5a94d1 554 (1ULL << INTERCEPT_WBINVD) |
916ce236
JR
555 (1ULL << INTERCEPT_MONITOR) |
556 (1ULL << INTERCEPT_MWAIT);
6aa8b732
AK
557
558 control->iopm_base_pa = iopm_base;
f65c229c 559 control->msrpm_base_pa = __pa(svm->msrpm);
0cc5064d 560 control->tsc_offset = 0;
6aa8b732
AK
561 control->int_ctl = V_INTR_MASKING_MASK;
562
563 init_seg(&save->es);
564 init_seg(&save->ss);
565 init_seg(&save->ds);
566 init_seg(&save->fs);
567 init_seg(&save->gs);
568
569 save->cs.selector = 0xf000;
570 /* Executable/Readable Code Segment */
571 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
572 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
573 save->cs.limit = 0xffff;
d92899a0
AK
574 /*
575 * cs.base should really be 0xffff0000, but vmx can't handle that, so
576 * be consistent with it.
577 *
578 * Replace when we have real mode working for vmx.
579 */
580 save->cs.base = 0xf0000;
6aa8b732
AK
581
582 save->gdtr.limit = 0xffff;
583 save->idtr.limit = 0xffff;
584
585 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
586 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
587
9962d032 588 save->efer = EFER_SVME;
d77c26fc 589 save->dr6 = 0xffff0ff0;
6aa8b732
AK
590 save->dr7 = 0x400;
591 save->rflags = 2;
592 save->rip = 0x0000fff0;
5fdbf976 593 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
6aa8b732
AK
594
595 /*
596 * cr0 val on cpu init should be 0x60000010, we enable cpu
597 * cache by default. the orderly way is to enable cache in bios.
598 */
707d92fa 599 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
66aee91a 600 save->cr4 = X86_CR4_PAE;
6aa8b732 601 /* rdx = ?? */
709ddebf
JR
602
603 if (npt_enabled) {
604 /* Setup VMCB for Nested Paging */
605 control->nested_ctl = 1;
a7052897
MT
606 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
607 (1ULL << INTERCEPT_INVLPG));
709ddebf
JR
608 control->intercept_exceptions &= ~(1 << PF_VECTOR);
609 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
610 INTERCEPT_CR3_MASK);
611 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
612 INTERCEPT_CR3_MASK);
613 save->g_pat = 0x0007040600070406ULL;
614 /* enable caching because the QEMU Bios doesn't enable it */
615 save->cr0 = X86_CR0_ET;
616 save->cr3 = 0;
617 save->cr4 = 0;
618 }
a79d2f18 619 force_new_asid(&svm->vcpu);
1371d904 620
3d6368ef 621 svm->nested_vmcb = 0;
1371d904 622 svm->vcpu.arch.hflags = HF_GIF_MASK;
6aa8b732
AK
623}
624
e00c8cf2 625static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
04d2cc77
AK
626{
627 struct vcpu_svm *svm = to_svm(vcpu);
628
e6101a96 629 init_vmcb(svm);
70433389 630
c5af89b6 631 if (!kvm_vcpu_is_bsp(vcpu)) {
5fdbf976 632 kvm_rip_write(vcpu, 0);
ad312c7c
ZX
633 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
634 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
70433389 635 }
5fdbf976
MT
636 vcpu->arch.regs_avail = ~0;
637 vcpu->arch.regs_dirty = ~0;
e00c8cf2
AK
638
639 return 0;
04d2cc77
AK
640}
641
fb3f0f51 642static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 643{
a2fa3e9f 644 struct vcpu_svm *svm;
6aa8b732 645 struct page *page;
f65c229c 646 struct page *msrpm_pages;
b286d5d8 647 struct page *hsave_page;
3d6368ef 648 struct page *nested_msrpm_pages;
fb3f0f51 649 int err;
6aa8b732 650
c16f862d 651 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
fb3f0f51
RR
652 if (!svm) {
653 err = -ENOMEM;
654 goto out;
655 }
656
657 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
658 if (err)
659 goto free_svm;
660
6aa8b732 661 page = alloc_page(GFP_KERNEL);
fb3f0f51
RR
662 if (!page) {
663 err = -ENOMEM;
664 goto uninit;
665 }
6aa8b732 666
f65c229c
JR
667 err = -ENOMEM;
668 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
669 if (!msrpm_pages)
670 goto uninit;
3d6368ef
AG
671
672 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
673 if (!nested_msrpm_pages)
674 goto uninit;
675
f65c229c
JR
676 svm->msrpm = page_address(msrpm_pages);
677 svm_vcpu_init_msrpm(svm->msrpm);
678
b286d5d8
AG
679 hsave_page = alloc_page(GFP_KERNEL);
680 if (!hsave_page)
681 goto uninit;
682 svm->hsave = page_address(hsave_page);
683
3d6368ef
AG
684 svm->nested_msrpm = page_address(nested_msrpm_pages);
685
a2fa3e9f
GH
686 svm->vmcb = page_address(page);
687 clear_page(svm->vmcb);
688 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
689 svm->asid_generation = 0;
e6101a96 690 init_vmcb(svm);
a2fa3e9f 691
fb3f0f51
RR
692 fx_init(&svm->vcpu);
693 svm->vcpu.fpu_active = 1;
ad312c7c 694 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
c5af89b6 695 if (kvm_vcpu_is_bsp(&svm->vcpu))
ad312c7c 696 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
6aa8b732 697
fb3f0f51 698 return &svm->vcpu;
36241b8c 699
fb3f0f51
RR
700uninit:
701 kvm_vcpu_uninit(&svm->vcpu);
702free_svm:
a4770347 703 kmem_cache_free(kvm_vcpu_cache, svm);
fb3f0f51
RR
704out:
705 return ERR_PTR(err);
6aa8b732
AK
706}
707
708static void svm_free_vcpu(struct kvm_vcpu *vcpu)
709{
a2fa3e9f
GH
710 struct vcpu_svm *svm = to_svm(vcpu);
711
fb3f0f51 712 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
f65c229c 713 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
b286d5d8 714 __free_page(virt_to_page(svm->hsave));
3d6368ef 715 __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
fb3f0f51 716 kvm_vcpu_uninit(vcpu);
a4770347 717 kmem_cache_free(kvm_vcpu_cache, svm);
6aa8b732
AK
718}
719
15ad7146 720static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 721{
a2fa3e9f 722 struct vcpu_svm *svm = to_svm(vcpu);
15ad7146 723 int i;
0cc5064d 724
0cc5064d
AK
725 if (unlikely(cpu != vcpu->cpu)) {
726 u64 tsc_this, delta;
727
728 /*
729 * Make sure that the guest sees a monotonically
730 * increasing TSC.
731 */
732 rdtscll(tsc_this);
ad312c7c 733 delta = vcpu->arch.host_tsc - tsc_this;
a2fa3e9f 734 svm->vmcb->control.tsc_offset += delta;
0cc5064d 735 vcpu->cpu = cpu;
2f599714 736 kvm_migrate_timers(vcpu);
4b656b12 737 svm->asid_generation = 0;
0cc5064d 738 }
94dfbdb3
AL
739
740 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
a2fa3e9f 741 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
6aa8b732
AK
742}
743
744static void svm_vcpu_put(struct kvm_vcpu *vcpu)
745{
a2fa3e9f 746 struct vcpu_svm *svm = to_svm(vcpu);
94dfbdb3
AL
747 int i;
748
e1beb1d3 749 ++vcpu->stat.host_state_reload;
94dfbdb3 750 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
a2fa3e9f 751 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
94dfbdb3 752
ad312c7c 753 rdtscll(vcpu->arch.host_tsc);
6aa8b732
AK
754}
755
6aa8b732
AK
756static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
757{
a2fa3e9f 758 return to_svm(vcpu)->vmcb->save.rflags;
6aa8b732
AK
759}
760
761static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
762{
a2fa3e9f 763 to_svm(vcpu)->vmcb->save.rflags = rflags;
6aa8b732
AK
764}
765
6de4f3ad
AK
766static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
767{
768 switch (reg) {
769 case VCPU_EXREG_PDPTR:
770 BUG_ON(!npt_enabled);
771 load_pdptrs(vcpu, vcpu->arch.cr3);
772 break;
773 default:
774 BUG();
775 }
776}
777
f0b85051
AG
778static void svm_set_vintr(struct vcpu_svm *svm)
779{
780 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
781}
782
783static void svm_clear_vintr(struct vcpu_svm *svm)
784{
785 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
786}
787
6aa8b732
AK
788static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
789{
a2fa3e9f 790 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
6aa8b732
AK
791
792 switch (seg) {
793 case VCPU_SREG_CS: return &save->cs;
794 case VCPU_SREG_DS: return &save->ds;
795 case VCPU_SREG_ES: return &save->es;
796 case VCPU_SREG_FS: return &save->fs;
797 case VCPU_SREG_GS: return &save->gs;
798 case VCPU_SREG_SS: return &save->ss;
799 case VCPU_SREG_TR: return &save->tr;
800 case VCPU_SREG_LDTR: return &save->ldtr;
801 }
802 BUG();
8b6d44c7 803 return NULL;
6aa8b732
AK
804}
805
806static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
807{
808 struct vmcb_seg *s = svm_seg(vcpu, seg);
809
810 return s->base;
811}
812
813static void svm_get_segment(struct kvm_vcpu *vcpu,
814 struct kvm_segment *var, int seg)
815{
816 struct vmcb_seg *s = svm_seg(vcpu, seg);
817
818 var->base = s->base;
819 var->limit = s->limit;
820 var->selector = s->selector;
821 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
822 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
823 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
824 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
825 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
826 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
827 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
828 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
25022acc 829
19bca6ab
AP
830 /* AMD's VMCB does not have an explicit unusable field, so emulate it
831 * for cross vendor migration purposes by "not present"
832 */
833 var->unusable = !var->present || (var->type == 0);
834
1fbdc7a5
AP
835 switch (seg) {
836 case VCPU_SREG_CS:
837 /*
838 * SVM always stores 0 for the 'G' bit in the CS selector in
839 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
840 * Intel's VMENTRY has a check on the 'G' bit.
841 */
25022acc 842 var->g = s->limit > 0xfffff;
1fbdc7a5
AP
843 break;
844 case VCPU_SREG_TR:
845 /*
846 * Work around a bug where the busy flag in the tr selector
847 * isn't exposed
848 */
c0d09828 849 var->type |= 0x2;
1fbdc7a5
AP
850 break;
851 case VCPU_SREG_DS:
852 case VCPU_SREG_ES:
853 case VCPU_SREG_FS:
854 case VCPU_SREG_GS:
855 /*
856 * The accessed bit must always be set in the segment
857 * descriptor cache, although it can be cleared in the
858 * descriptor, the cached bit always remains at 1. Since
859 * Intel has a check on this, set it here to support
860 * cross-vendor migration.
861 */
862 if (!var->unusable)
863 var->type |= 0x1;
864 break;
b586eb02
AP
865 case VCPU_SREG_SS:
866 /* On AMD CPUs sometimes the DB bit in the segment
867 * descriptor is left as 1, although the whole segment has
868 * been made unusable. Clear it here to pass an Intel VMX
869 * entry check when cross vendor migrating.
870 */
871 if (var->unusable)
872 var->db = 0;
873 break;
1fbdc7a5 874 }
6aa8b732
AK
875}
876
2e4d2653
IE
877static int svm_get_cpl(struct kvm_vcpu *vcpu)
878{
879 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
880
881 return save->cpl;
882}
883
6aa8b732
AK
884static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
885{
a2fa3e9f
GH
886 struct vcpu_svm *svm = to_svm(vcpu);
887
888 dt->limit = svm->vmcb->save.idtr.limit;
889 dt->base = svm->vmcb->save.idtr.base;
6aa8b732
AK
890}
891
892static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
893{
a2fa3e9f
GH
894 struct vcpu_svm *svm = to_svm(vcpu);
895
896 svm->vmcb->save.idtr.limit = dt->limit;
897 svm->vmcb->save.idtr.base = dt->base ;
6aa8b732
AK
898}
899
900static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
901{
a2fa3e9f
GH
902 struct vcpu_svm *svm = to_svm(vcpu);
903
904 dt->limit = svm->vmcb->save.gdtr.limit;
905 dt->base = svm->vmcb->save.gdtr.base;
6aa8b732
AK
906}
907
908static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
909{
a2fa3e9f
GH
910 struct vcpu_svm *svm = to_svm(vcpu);
911
912 svm->vmcb->save.gdtr.limit = dt->limit;
913 svm->vmcb->save.gdtr.base = dt->base ;
6aa8b732
AK
914}
915
25c4c276 916static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3
AK
917{
918}
919
6aa8b732
AK
920static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
921{
a2fa3e9f
GH
922 struct vcpu_svm *svm = to_svm(vcpu);
923
05b3e0c2 924#ifdef CONFIG_X86_64
ad312c7c 925 if (vcpu->arch.shadow_efer & EFER_LME) {
707d92fa 926 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
ad312c7c 927 vcpu->arch.shadow_efer |= EFER_LMA;
2b5203ee 928 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
6aa8b732
AK
929 }
930
d77c26fc 931 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
ad312c7c 932 vcpu->arch.shadow_efer &= ~EFER_LMA;
2b5203ee 933 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
6aa8b732
AK
934 }
935 }
936#endif
709ddebf
JR
937 if (npt_enabled)
938 goto set;
939
ad312c7c 940 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
a2fa3e9f 941 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
7807fa6c
AL
942 vcpu->fpu_active = 1;
943 }
944
ad312c7c 945 vcpu->arch.cr0 = cr0;
707d92fa 946 cr0 |= X86_CR0_PG | X86_CR0_WP;
6b390b63
JR
947 if (!vcpu->fpu_active) {
948 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
334df50a 949 cr0 |= X86_CR0_TS;
6b390b63 950 }
709ddebf
JR
951set:
952 /*
953 * re-enable caching here because the QEMU bios
954 * does not do it - this results in some delay at
955 * reboot
956 */
957 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
a2fa3e9f 958 svm->vmcb->save.cr0 = cr0;
6aa8b732
AK
959}
960
961static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
962{
6394b649 963 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
e5eab0ce
JR
964 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
965
966 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
967 force_new_asid(vcpu);
6394b649 968
ec077263
JR
969 vcpu->arch.cr4 = cr4;
970 if (!npt_enabled)
971 cr4 |= X86_CR4_PAE;
6394b649 972 cr4 |= host_cr4_mce;
ec077263 973 to_svm(vcpu)->vmcb->save.cr4 = cr4;
6aa8b732
AK
974}
975
976static void svm_set_segment(struct kvm_vcpu *vcpu,
977 struct kvm_segment *var, int seg)
978{
a2fa3e9f 979 struct vcpu_svm *svm = to_svm(vcpu);
6aa8b732
AK
980 struct vmcb_seg *s = svm_seg(vcpu, seg);
981
982 s->base = var->base;
983 s->limit = var->limit;
984 s->selector = var->selector;
985 if (var->unusable)
986 s->attrib = 0;
987 else {
988 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
989 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
990 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
991 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
992 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
993 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
994 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
995 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
996 }
997 if (seg == VCPU_SREG_CS)
a2fa3e9f
GH
998 svm->vmcb->save.cpl
999 = (svm->vmcb->save.cs.attrib
6aa8b732
AK
1000 >> SVM_SELECTOR_DPL_SHIFT) & 3;
1001
1002}
1003
44c11430 1004static void update_db_intercept(struct kvm_vcpu *vcpu)
6aa8b732 1005{
d0bfb940
JK
1006 struct vcpu_svm *svm = to_svm(vcpu);
1007
d0bfb940
JK
1008 svm->vmcb->control.intercept_exceptions &=
1009 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
44c11430
GN
1010
1011 if (vcpu->arch.singlestep)
1012 svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
1013
d0bfb940
JK
1014 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1015 if (vcpu->guest_debug &
1016 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1017 svm->vmcb->control.intercept_exceptions |=
1018 1 << DB_VECTOR;
1019 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1020 svm->vmcb->control.intercept_exceptions |=
1021 1 << BP_VECTOR;
1022 } else
1023 vcpu->guest_debug = 0;
44c11430
GN
1024}
1025
1026static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1027{
1028 int old_debug = vcpu->guest_debug;
1029 struct vcpu_svm *svm = to_svm(vcpu);
1030
1031 vcpu->guest_debug = dbg->control;
1032
1033 update_db_intercept(vcpu);
d0bfb940 1034
ae675ef0
JK
1035 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1036 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1037 else
1038 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1039
d0bfb940
JK
1040 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1041 svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1042 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
1043 svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1044
1045 return 0;
6aa8b732
AK
1046}
1047
1048static void load_host_msrs(struct kvm_vcpu *vcpu)
1049{
94dfbdb3 1050#ifdef CONFIG_X86_64
a2fa3e9f 1051 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
94dfbdb3 1052#endif
6aa8b732
AK
1053}
1054
1055static void save_host_msrs(struct kvm_vcpu *vcpu)
1056{
94dfbdb3 1057#ifdef CONFIG_X86_64
a2fa3e9f 1058 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
94dfbdb3 1059#endif
6aa8b732
AK
1060}
1061
e756fc62 1062static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
6aa8b732
AK
1063{
1064 if (svm_data->next_asid > svm_data->max_asid) {
1065 ++svm_data->asid_generation;
1066 svm_data->next_asid = 1;
a2fa3e9f 1067 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
6aa8b732
AK
1068 }
1069
a2fa3e9f
GH
1070 svm->asid_generation = svm_data->asid_generation;
1071 svm->vmcb->control.asid = svm_data->next_asid++;
6aa8b732
AK
1072}
1073
6aa8b732
AK
1074static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
1075{
42dbaa5a
JK
1076 struct vcpu_svm *svm = to_svm(vcpu);
1077 unsigned long val;
1078
1079 switch (dr) {
1080 case 0 ... 3:
1081 val = vcpu->arch.db[dr];
1082 break;
1083 case 6:
1084 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1085 val = vcpu->arch.dr6;
1086 else
1087 val = svm->vmcb->save.dr6;
1088 break;
1089 case 7:
1090 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1091 val = vcpu->arch.dr7;
1092 else
1093 val = svm->vmcb->save.dr7;
1094 break;
1095 default:
1096 val = 0;
1097 }
1098
af9ca2d7
JR
1099 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
1100 return val;
6aa8b732
AK
1101}
1102
1103static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
1104 int *exception)
1105{
a2fa3e9f
GH
1106 struct vcpu_svm *svm = to_svm(vcpu);
1107
42dbaa5a 1108 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
6aa8b732 1109
42dbaa5a 1110 *exception = 0;
6aa8b732
AK
1111
1112 switch (dr) {
1113 case 0 ... 3:
42dbaa5a
JK
1114 vcpu->arch.db[dr] = value;
1115 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1116 vcpu->arch.eff_db[dr] = value;
6aa8b732
AK
1117 return;
1118 case 4 ... 5:
42dbaa5a 1119 if (vcpu->arch.cr4 & X86_CR4_DE)
6aa8b732 1120 *exception = UD_VECTOR;
42dbaa5a
JK
1121 return;
1122 case 6:
1123 if (value & 0xffffffff00000000ULL) {
1124 *exception = GP_VECTOR;
6aa8b732
AK
1125 return;
1126 }
42dbaa5a
JK
1127 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1128 return;
1129 case 7:
1130 if (value & 0xffffffff00000000ULL) {
6aa8b732
AK
1131 *exception = GP_VECTOR;
1132 return;
1133 }
42dbaa5a
JK
1134 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1135 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1136 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1137 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1138 }
6aa8b732 1139 return;
6aa8b732 1140 default:
42dbaa5a 1141 /* FIXME: Possible case? */
6aa8b732 1142 printk(KERN_DEBUG "%s: unexpected dr %u\n",
b8688d51 1143 __func__, dr);
6aa8b732
AK
1144 *exception = UD_VECTOR;
1145 return;
1146 }
1147}
1148
e756fc62 1149static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 1150{
6aa8b732
AK
1151 u64 fault_address;
1152 u32 error_code;
6aa8b732 1153
a2fa3e9f
GH
1154 fault_address = svm->vmcb->control.exit_info_2;
1155 error_code = svm->vmcb->control.exit_info_1;
af9ca2d7
JR
1156
1157 if (!npt_enabled)
1158 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1159 (u32)fault_address, (u32)(fault_address >> 32),
1160 handler);
d2ebb410
JR
1161 else
1162 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1163 (u32)fault_address, (u32)(fault_address >> 32),
1164 handler);
44874f84
JR
1165 /*
1166 * FIXME: Tis shouldn't be necessary here, but there is a flush
1167 * missing in the MMU code. Until we find this bug, flush the
1168 * complete TLB here on an NPF
1169 */
1170 if (npt_enabled)
1171 svm_flush_tlb(&svm->vcpu);
9222be18 1172 else {
3298b75c 1173 if (kvm_event_needs_reinjection(&svm->vcpu))
9222be18
GN
1174 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1175 }
3067714c 1176 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
6aa8b732
AK
1177}
1178
d0bfb940
JK
1179static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1180{
1181 if (!(svm->vcpu.guest_debug &
44c11430
GN
1182 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1183 !svm->vcpu.arch.singlestep) {
d0bfb940
JK
1184 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1185 return 1;
1186 }
44c11430
GN
1187
1188 if (svm->vcpu.arch.singlestep) {
1189 svm->vcpu.arch.singlestep = false;
1190 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1191 svm->vmcb->save.rflags &=
1192 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1193 update_db_intercept(&svm->vcpu);
1194 }
1195
1196 if (svm->vcpu.guest_debug &
1197 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
1198 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1199 kvm_run->debug.arch.pc =
1200 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1201 kvm_run->debug.arch.exception = DB_VECTOR;
1202 return 0;
1203 }
1204
1205 return 1;
d0bfb940
JK
1206}
1207
1208static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1209{
1210 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1211 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1212 kvm_run->debug.arch.exception = BP_VECTOR;
1213 return 0;
1214}
1215
7aa81cc0
AL
1216static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1217{
1218 int er;
1219
571008da 1220 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
7aa81cc0 1221 if (er != EMULATE_DONE)
7ee5d940 1222 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
7aa81cc0
AL
1223 return 1;
1224}
1225
e756fc62 1226static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
7807fa6c 1227{
a2fa3e9f 1228 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
ad312c7c 1229 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
a2fa3e9f 1230 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
e756fc62 1231 svm->vcpu.fpu_active = 1;
a2fa3e9f
GH
1232
1233 return 1;
7807fa6c
AL
1234}
1235
53371b50
JR
1236static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1237{
1238 /*
1239 * On an #MC intercept the MCE handler is not called automatically in
1240 * the host. So do it by hand here.
1241 */
1242 asm volatile (
1243 "int $0x12\n");
1244 /* not sure if we ever come back to this point */
1245
1246 return 1;
1247}
1248
e756fc62 1249static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
46fe4ddd
JR
1250{
1251 /*
1252 * VMCB is undefined after a SHUTDOWN intercept
1253 * so reinitialize it.
1254 */
a2fa3e9f 1255 clear_page(svm->vmcb);
e6101a96 1256 init_vmcb(svm);
46fe4ddd
JR
1257
1258 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1259 return 0;
1260}
1261
e756fc62 1262static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 1263{
d77c26fc 1264 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
34c33d16 1265 int size, in, string;
039576c0 1266 unsigned port;
6aa8b732 1267
e756fc62 1268 ++svm->vcpu.stat.io_exits;
6aa8b732 1269
a2fa3e9f 1270 svm->next_rip = svm->vmcb->control.exit_info_2;
6aa8b732 1271
e70669ab
LV
1272 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1273
1274 if (string) {
3427318f
LV
1275 if (emulate_instruction(&svm->vcpu,
1276 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
e70669ab
LV
1277 return 0;
1278 return 1;
1279 }
1280
039576c0
AK
1281 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1282 port = io_info >> 16;
1283 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
6aa8b732 1284
e93f36bc 1285 skip_emulated_instruction(&svm->vcpu);
3090dd73 1286 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
6aa8b732
AK
1287}
1288
c47f098d
JR
1289static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1290{
af9ca2d7 1291 KVMTRACE_0D(NMI, &svm->vcpu, handler);
c47f098d
JR
1292 return 1;
1293}
1294
a0698055
JR
1295static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1296{
1297 ++svm->vcpu.stat.irq_exits;
af9ca2d7 1298 KVMTRACE_0D(INTR, &svm->vcpu, handler);
a0698055
JR
1299 return 1;
1300}
1301
e756fc62 1302static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732
AK
1303{
1304 return 1;
1305}
1306
e756fc62 1307static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 1308{
5fdbf976 1309 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
e756fc62
RR
1310 skip_emulated_instruction(&svm->vcpu);
1311 return kvm_emulate_halt(&svm->vcpu);
6aa8b732
AK
1312}
1313
e756fc62 1314static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
02e235bc 1315{
5fdbf976 1316 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
e756fc62 1317 skip_emulated_instruction(&svm->vcpu);
7aa81cc0
AL
1318 kvm_emulate_hypercall(&svm->vcpu);
1319 return 1;
02e235bc
AK
1320}
1321
c0725420
AG
1322static int nested_svm_check_permissions(struct vcpu_svm *svm)
1323{
1324 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
1325 || !is_paging(&svm->vcpu)) {
1326 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1327 return 1;
1328 }
1329
1330 if (svm->vmcb->save.cpl) {
1331 kvm_inject_gp(&svm->vcpu, 0);
1332 return 1;
1333 }
1334
1335 return 0;
1336}
1337
cf74a78b
AG
1338static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1339 bool has_error_code, u32 error_code)
1340{
1341 if (is_nested(svm)) {
1342 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1343 svm->vmcb->control.exit_code_hi = 0;
1344 svm->vmcb->control.exit_info_1 = error_code;
1345 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1346 if (nested_svm_exit_handled(svm, false)) {
1347 nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
1348
1349 nested_svm_vmexit(svm);
1350 return 1;
1351 }
1352 }
1353
1354 return 0;
1355}
1356
1357static inline int nested_svm_intr(struct vcpu_svm *svm)
1358{
1359 if (is_nested(svm)) {
1360 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1361 return 0;
1362
1363 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1364 return 0;
1365
1366 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1367
1368 if (nested_svm_exit_handled(svm, false)) {
1369 nsvm_printk("VMexit -> INTR\n");
1370 nested_svm_vmexit(svm);
1371 return 1;
1372 }
1373 }
1374
1375 return 0;
1376}
1377
c0725420
AG
1378static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1379{
1380 struct page *page;
1381
1382 down_read(&current->mm->mmap_sem);
1383 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1384 up_read(&current->mm->mmap_sem);
1385
1386 if (is_error_page(page)) {
1387 printk(KERN_INFO "%s: could not find page at 0x%llx\n",
1388 __func__, gpa);
1389 kvm_release_page_clean(page);
1390 kvm_inject_gp(&svm->vcpu, 0);
1391 return NULL;
1392 }
1393 return page;
1394}
1395
1396static int nested_svm_do(struct vcpu_svm *svm,
1397 u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1398 int (*handler)(struct vcpu_svm *svm,
1399 void *arg1,
1400 void *arg2,
1401 void *opaque))
1402{
1403 struct page *arg1_page;
1404 struct page *arg2_page = NULL;
1405 void *arg1;
1406 void *arg2 = NULL;
1407 int retval;
1408
1409 arg1_page = nested_svm_get_page(svm, arg1_gpa);
1410 if(arg1_page == NULL)
1411 return 1;
1412
1413 if (arg2_gpa) {
1414 arg2_page = nested_svm_get_page(svm, arg2_gpa);
1415 if(arg2_page == NULL) {
1416 kvm_release_page_clean(arg1_page);
1417 return 1;
1418 }
1419 }
1420
1421 arg1 = kmap_atomic(arg1_page, KM_USER0);
1422 if (arg2_gpa)
1423 arg2 = kmap_atomic(arg2_page, KM_USER1);
1424
1425 retval = handler(svm, arg1, arg2, opaque);
1426
1427 kunmap_atomic(arg1, KM_USER0);
1428 if (arg2_gpa)
1429 kunmap_atomic(arg2, KM_USER1);
1430
1431 kvm_release_page_dirty(arg1_page);
1432 if (arg2_gpa)
1433 kvm_release_page_dirty(arg2_page);
1434
1435 return retval;
1436}
1437
cf74a78b
AG
1438static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
1439 void *arg1,
1440 void *arg2,
1441 void *opaque)
1442{
1443 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1444 bool kvm_overrides = *(bool *)opaque;
1445 u32 exit_code = svm->vmcb->control.exit_code;
1446
1447 if (kvm_overrides) {
1448 switch (exit_code) {
1449 case SVM_EXIT_INTR:
1450 case SVM_EXIT_NMI:
1451 return 0;
1452 /* For now we are always handling NPFs when using them */
1453 case SVM_EXIT_NPF:
1454 if (npt_enabled)
1455 return 0;
1456 break;
1457 /* When we're shadowing, trap PFs */
1458 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1459 if (!npt_enabled)
1460 return 0;
1461 break;
1462 default:
1463 break;
1464 }
1465 }
1466
1467 switch (exit_code) {
1468 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1469 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1470 if (nested_vmcb->control.intercept_cr_read & cr_bits)
1471 return 1;
1472 break;
1473 }
1474 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1475 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1476 if (nested_vmcb->control.intercept_cr_write & cr_bits)
1477 return 1;
1478 break;
1479 }
1480 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1481 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1482 if (nested_vmcb->control.intercept_dr_read & dr_bits)
1483 return 1;
1484 break;
1485 }
1486 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1487 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1488 if (nested_vmcb->control.intercept_dr_write & dr_bits)
1489 return 1;
1490 break;
1491 }
1492 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1493 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1494 if (nested_vmcb->control.intercept_exceptions & excp_bits)
1495 return 1;
1496 break;
1497 }
1498 default: {
1499 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1500 nsvm_printk("exit code: 0x%x\n", exit_code);
1501 if (nested_vmcb->control.intercept & exit_bits)
1502 return 1;
1503 }
1504 }
1505
1506 return 0;
1507}
1508
1509static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1510 void *arg1, void *arg2,
1511 void *opaque)
1512{
1513 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1514 u8 *msrpm = (u8 *)arg2;
1515 u32 t0, t1;
1516 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1517 u32 param = svm->vmcb->control.exit_info_1 & 1;
1518
1519 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1520 return 0;
1521
1522 switch(msr) {
1523 case 0 ... 0x1fff:
1524 t0 = (msr * 2) % 8;
1525 t1 = msr / 8;
1526 break;
1527 case 0xc0000000 ... 0xc0001fff:
1528 t0 = (8192 + msr - 0xc0000000) * 2;
1529 t1 = (t0 / 8);
1530 t0 %= 8;
1531 break;
1532 case 0xc0010000 ... 0xc0011fff:
1533 t0 = (16384 + msr - 0xc0010000) * 2;
1534 t1 = (t0 / 8);
1535 t0 %= 8;
1536 break;
1537 default:
1538 return 1;
1539 break;
1540 }
1541 if (msrpm[t1] & ((1 << param) << t0))
1542 return 1;
1543
1544 return 0;
1545}
1546
1547static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1548{
1549 bool k = kvm_override;
1550
1551 switch (svm->vmcb->control.exit_code) {
1552 case SVM_EXIT_MSR:
1553 return nested_svm_do(svm, svm->nested_vmcb,
1554 svm->nested_vmcb_msrpm, NULL,
1555 nested_svm_exit_handled_msr);
1556 default: break;
1557 }
1558
1559 return nested_svm_do(svm, svm->nested_vmcb, 0, &k,
1560 nested_svm_exit_handled_real);
1561}
1562
1563static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1564 void *arg2, void *opaque)
1565{
1566 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1567 struct vmcb *hsave = svm->hsave;
1568 u64 nested_save[] = { nested_vmcb->save.cr0,
1569 nested_vmcb->save.cr3,
1570 nested_vmcb->save.cr4,
1571 nested_vmcb->save.efer,
1572 nested_vmcb->control.intercept_cr_read,
1573 nested_vmcb->control.intercept_cr_write,
1574 nested_vmcb->control.intercept_dr_read,
1575 nested_vmcb->control.intercept_dr_write,
1576 nested_vmcb->control.intercept_exceptions,
1577 nested_vmcb->control.intercept,
1578 nested_vmcb->control.msrpm_base_pa,
1579 nested_vmcb->control.iopm_base_pa,
1580 nested_vmcb->control.tsc_offset };
1581
1582 /* Give the current vmcb to the guest */
1583 memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb));
1584 nested_vmcb->save.cr0 = nested_save[0];
1585 if (!npt_enabled)
1586 nested_vmcb->save.cr3 = nested_save[1];
1587 nested_vmcb->save.cr4 = nested_save[2];
1588 nested_vmcb->save.efer = nested_save[3];
1589 nested_vmcb->control.intercept_cr_read = nested_save[4];
1590 nested_vmcb->control.intercept_cr_write = nested_save[5];
1591 nested_vmcb->control.intercept_dr_read = nested_save[6];
1592 nested_vmcb->control.intercept_dr_write = nested_save[7];
1593 nested_vmcb->control.intercept_exceptions = nested_save[8];
1594 nested_vmcb->control.intercept = nested_save[9];
1595 nested_vmcb->control.msrpm_base_pa = nested_save[10];
1596 nested_vmcb->control.iopm_base_pa = nested_save[11];
1597 nested_vmcb->control.tsc_offset = nested_save[12];
1598
1599 /* We always set V_INTR_MASKING and remember the old value in hflags */
1600 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1601 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1602
1603 if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
1604 (nested_vmcb->control.int_vector)) {
1605 nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
1606 nested_vmcb->control.int_vector);
1607 }
1608
1609 /* Restore the original control entries */
1610 svm->vmcb->control = hsave->control;
1611
1612 /* Kill any pending exceptions */
1613 if (svm->vcpu.arch.exception.pending == true)
1614 nsvm_printk("WARNING: Pending Exception\n");
1615 svm->vcpu.arch.exception.pending = false;
1616
1617 /* Restore selected save entries */
1618 svm->vmcb->save.es = hsave->save.es;
1619 svm->vmcb->save.cs = hsave->save.cs;
1620 svm->vmcb->save.ss = hsave->save.ss;
1621 svm->vmcb->save.ds = hsave->save.ds;
1622 svm->vmcb->save.gdtr = hsave->save.gdtr;
1623 svm->vmcb->save.idtr = hsave->save.idtr;
1624 svm->vmcb->save.rflags = hsave->save.rflags;
1625 svm_set_efer(&svm->vcpu, hsave->save.efer);
1626 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1627 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1628 if (npt_enabled) {
1629 svm->vmcb->save.cr3 = hsave->save.cr3;
1630 svm->vcpu.arch.cr3 = hsave->save.cr3;
1631 } else {
1632 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1633 }
1634 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1635 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1636 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1637 svm->vmcb->save.dr7 = 0;
1638 svm->vmcb->save.cpl = 0;
1639 svm->vmcb->control.exit_int_info = 0;
1640
1641 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1642 /* Exit nested SVM mode */
1643 svm->nested_vmcb = 0;
1644
1645 return 0;
1646}
1647
1648static int nested_svm_vmexit(struct vcpu_svm *svm)
1649{
1650 nsvm_printk("VMexit\n");
1651 if (nested_svm_do(svm, svm->nested_vmcb, 0,
1652 NULL, nested_svm_vmexit_real))
1653 return 1;
1654
1655 kvm_mmu_reset_context(&svm->vcpu);
1656 kvm_mmu_load(&svm->vcpu);
1657
1658 return 0;
1659}
3d6368ef
AG
1660
1661static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
1662 void *arg2, void *opaque)
1663{
1664 int i;
1665 u32 *nested_msrpm = (u32*)arg1;
1666 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1667 svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1668 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
1669
1670 return 0;
1671}
1672
1673static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1674 void *arg2, void *opaque)
1675{
1676 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1677 struct vmcb *hsave = svm->hsave;
1678
1679 /* nested_vmcb is our indicator if nested SVM is activated */
1680 svm->nested_vmcb = svm->vmcb->save.rax;
1681
1682 /* Clear internal status */
1683 svm->vcpu.arch.exception.pending = false;
1684
1685 /* Save the old vmcb, so we don't need to pick what we save, but
1686 can restore everything when a VMEXIT occurs */
1687 memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
1688 /* We need to remember the original CR3 in the SPT case */
1689 if (!npt_enabled)
1690 hsave->save.cr3 = svm->vcpu.arch.cr3;
1691 hsave->save.cr4 = svm->vcpu.arch.cr4;
1692 hsave->save.rip = svm->next_rip;
1693
1694 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1695 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1696 else
1697 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1698
1699 /* Load the nested guest state */
1700 svm->vmcb->save.es = nested_vmcb->save.es;
1701 svm->vmcb->save.cs = nested_vmcb->save.cs;
1702 svm->vmcb->save.ss = nested_vmcb->save.ss;
1703 svm->vmcb->save.ds = nested_vmcb->save.ds;
1704 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1705 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1706 svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1707 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1708 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1709 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1710 if (npt_enabled) {
1711 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1712 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1713 } else {
1714 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1715 kvm_mmu_reset_context(&svm->vcpu);
1716 }
1717 svm->vmcb->save.cr2 = nested_vmcb->save.cr2;
1718 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1719 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1720 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1721 /* In case we don't even reach vcpu_run, the fields are not updated */
1722 svm->vmcb->save.rax = nested_vmcb->save.rax;
1723 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1724 svm->vmcb->save.rip = nested_vmcb->save.rip;
1725 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1726 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1727 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1728
1729 /* We don't want a nested guest to be more powerful than the guest,
1730 so all intercepts are ORed */
1731 svm->vmcb->control.intercept_cr_read |=
1732 nested_vmcb->control.intercept_cr_read;
1733 svm->vmcb->control.intercept_cr_write |=
1734 nested_vmcb->control.intercept_cr_write;
1735 svm->vmcb->control.intercept_dr_read |=
1736 nested_vmcb->control.intercept_dr_read;
1737 svm->vmcb->control.intercept_dr_write |=
1738 nested_vmcb->control.intercept_dr_write;
1739 svm->vmcb->control.intercept_exceptions |=
1740 nested_vmcb->control.intercept_exceptions;
1741
1742 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1743
1744 svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1745
1746 force_new_asid(&svm->vcpu);
1747 svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
1748 svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err;
1749 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1750 if (nested_vmcb->control.int_ctl & V_IRQ_MASK) {
1751 nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
1752 nested_vmcb->control.int_ctl);
1753 }
1754 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1755 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1756 else
1757 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1758
1759 nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
1760 nested_vmcb->control.exit_int_info,
1761 nested_vmcb->control.int_state);
1762
1763 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1764 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1765 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1766 if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID)
1767 nsvm_printk("Injecting Event: 0x%x\n",
1768 nested_vmcb->control.event_inj);
1769 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1770 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1771
1772 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1773
1774 return 0;
1775}
1776
5542675b
AG
1777static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1778{
1779 to_vmcb->save.fs = from_vmcb->save.fs;
1780 to_vmcb->save.gs = from_vmcb->save.gs;
1781 to_vmcb->save.tr = from_vmcb->save.tr;
1782 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1783 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1784 to_vmcb->save.star = from_vmcb->save.star;
1785 to_vmcb->save.lstar = from_vmcb->save.lstar;
1786 to_vmcb->save.cstar = from_vmcb->save.cstar;
1787 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1788 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1789 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1790 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1791
1792 return 1;
1793}
1794
1795static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1796 void *arg2, void *opaque)
1797{
1798 return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1799}
1800
1801static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1802 void *arg2, void *opaque)
1803{
1804 return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1805}
1806
1807static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1808{
1809 if (nested_svm_check_permissions(svm))
1810 return 1;
1811
1812 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1813 skip_emulated_instruction(&svm->vcpu);
1814
1815 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
1816
1817 return 1;
1818}
1819
1820static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1821{
1822 if (nested_svm_check_permissions(svm))
1823 return 1;
1824
1825 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1826 skip_emulated_instruction(&svm->vcpu);
1827
1828 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
1829
1830 return 1;
1831}
1832
3d6368ef
AG
1833static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1834{
1835 nsvm_printk("VMrun\n");
1836 if (nested_svm_check_permissions(svm))
1837 return 1;
1838
1839 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1840 skip_emulated_instruction(&svm->vcpu);
1841
1842 if (nested_svm_do(svm, svm->vmcb->save.rax, 0,
1843 NULL, nested_svm_vmrun))
1844 return 1;
1845
1846 if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0,
1847 NULL, nested_svm_vmrun_msrpm))
1848 return 1;
1849
1850 return 1;
1851}
1852
1371d904
AG
1853static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1854{
1855 if (nested_svm_check_permissions(svm))
1856 return 1;
1857
1858 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1859 skip_emulated_instruction(&svm->vcpu);
1860
1861 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1862
1863 return 1;
1864}
1865
1866static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1867{
1868 if (nested_svm_check_permissions(svm))
1869 return 1;
1870
1871 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1872 skip_emulated_instruction(&svm->vcpu);
1873
1874 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1875
1876 /* After a CLGI no interrupts should come */
1877 svm_clear_vintr(svm);
1878 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1879
1880 return 1;
1881}
1882
e756fc62
RR
1883static int invalid_op_interception(struct vcpu_svm *svm,
1884 struct kvm_run *kvm_run)
6aa8b732 1885{
7ee5d940 1886 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
6aa8b732
AK
1887 return 1;
1888}
1889
e756fc62
RR
1890static int task_switch_interception(struct vcpu_svm *svm,
1891 struct kvm_run *kvm_run)
6aa8b732 1892{
37817f29 1893 u16 tss_selector;
64a7ec06
GN
1894 int reason;
1895 int int_type = svm->vmcb->control.exit_int_info &
1896 SVM_EXITINTINFO_TYPE_MASK;
8317c298 1897 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
fe8e7f83
GN
1898 uint32_t type =
1899 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
1900 uint32_t idt_v =
1901 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
37817f29
IE
1902
1903 tss_selector = (u16)svm->vmcb->control.exit_info_1;
64a7ec06 1904
37817f29
IE
1905 if (svm->vmcb->control.exit_info_2 &
1906 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
64a7ec06
GN
1907 reason = TASK_SWITCH_IRET;
1908 else if (svm->vmcb->control.exit_info_2 &
1909 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1910 reason = TASK_SWITCH_JMP;
fe8e7f83 1911 else if (idt_v)
64a7ec06
GN
1912 reason = TASK_SWITCH_GATE;
1913 else
1914 reason = TASK_SWITCH_CALL;
1915
fe8e7f83
GN
1916 if (reason == TASK_SWITCH_GATE) {
1917 switch (type) {
1918 case SVM_EXITINTINFO_TYPE_NMI:
1919 svm->vcpu.arch.nmi_injected = false;
1920 break;
1921 case SVM_EXITINTINFO_TYPE_EXEPT:
1922 kvm_clear_exception_queue(&svm->vcpu);
1923 break;
1924 case SVM_EXITINTINFO_TYPE_INTR:
1925 kvm_clear_interrupt_queue(&svm->vcpu);
1926 break;
1927 default:
1928 break;
1929 }
1930 }
64a7ec06 1931
8317c298
GN
1932 if (reason != TASK_SWITCH_GATE ||
1933 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
1934 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
f629cf84
GN
1935 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
1936 skip_emulated_instruction(&svm->vcpu);
64a7ec06
GN
1937
1938 return kvm_task_switch(&svm->vcpu, tss_selector, reason);
6aa8b732
AK
1939}
1940
e756fc62 1941static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 1942{
5fdbf976 1943 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
e756fc62 1944 kvm_emulate_cpuid(&svm->vcpu);
06465c5a 1945 return 1;
6aa8b732
AK
1946}
1947
95ba8273
GN
1948static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1949{
1950 ++svm->vcpu.stat.nmi_window_exits;
1951 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
44c11430 1952 svm->vcpu.arch.hflags |= HF_IRET_MASK;
95ba8273
GN
1953 return 1;
1954}
1955
a7052897
MT
1956static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1957{
1958 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1959 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1960 return 1;
1961}
1962
e756fc62
RR
1963static int emulate_on_interception(struct vcpu_svm *svm,
1964 struct kvm_run *kvm_run)
6aa8b732 1965{
3427318f 1966 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
b8688d51 1967 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
6aa8b732
AK
1968 return 1;
1969}
1970
1d075434
JR
1971static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1972{
0a5fff19
GN
1973 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
1974 /* instruction emulation calls kvm_set_cr8() */
1d075434 1975 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
95ba8273
GN
1976 if (irqchip_in_kernel(svm->vcpu.kvm)) {
1977 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1d075434 1978 return 1;
95ba8273 1979 }
0a5fff19
GN
1980 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
1981 return 1;
1d075434
JR
1982 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1983 return 0;
1984}
1985
6aa8b732
AK
1986static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1987{
a2fa3e9f
GH
1988 struct vcpu_svm *svm = to_svm(vcpu);
1989
6aa8b732 1990 switch (ecx) {
af24a4e4 1991 case MSR_IA32_TSC: {
6aa8b732
AK
1992 u64 tsc;
1993
1994 rdtscll(tsc);
a2fa3e9f 1995 *data = svm->vmcb->control.tsc_offset + tsc;
6aa8b732
AK
1996 break;
1997 }
0e859cac 1998 case MSR_K6_STAR:
a2fa3e9f 1999 *data = svm->vmcb->save.star;
6aa8b732 2000 break;
0e859cac 2001#ifdef CONFIG_X86_64
6aa8b732 2002 case MSR_LSTAR:
a2fa3e9f 2003 *data = svm->vmcb->save.lstar;
6aa8b732
AK
2004 break;
2005 case MSR_CSTAR:
a2fa3e9f 2006 *data = svm->vmcb->save.cstar;
6aa8b732
AK
2007 break;
2008 case MSR_KERNEL_GS_BASE:
a2fa3e9f 2009 *data = svm->vmcb->save.kernel_gs_base;
6aa8b732
AK
2010 break;
2011 case MSR_SYSCALL_MASK:
a2fa3e9f 2012 *data = svm->vmcb->save.sfmask;
6aa8b732
AK
2013 break;
2014#endif
2015 case MSR_IA32_SYSENTER_CS:
a2fa3e9f 2016 *data = svm->vmcb->save.sysenter_cs;
6aa8b732
AK
2017 break;
2018 case MSR_IA32_SYSENTER_EIP:
017cb99e 2019 *data = svm->sysenter_eip;
6aa8b732
AK
2020 break;
2021 case MSR_IA32_SYSENTER_ESP:
017cb99e 2022 *data = svm->sysenter_esp;
6aa8b732 2023 break;
a2938c80
JR
2024 /* Nobody will change the following 5 values in the VMCB so
2025 we can safely return them on rdmsr. They will always be 0
2026 until LBRV is implemented. */
2027 case MSR_IA32_DEBUGCTLMSR:
2028 *data = svm->vmcb->save.dbgctl;
2029 break;
2030 case MSR_IA32_LASTBRANCHFROMIP:
2031 *data = svm->vmcb->save.br_from;
2032 break;
2033 case MSR_IA32_LASTBRANCHTOIP:
2034 *data = svm->vmcb->save.br_to;
2035 break;
2036 case MSR_IA32_LASTINTFROMIP:
2037 *data = svm->vmcb->save.last_excp_from;
2038 break;
2039 case MSR_IA32_LASTINTTOIP:
2040 *data = svm->vmcb->save.last_excp_to;
2041 break;
b286d5d8
AG
2042 case MSR_VM_HSAVE_PA:
2043 *data = svm->hsave_msr;
2044 break;
eb6f302e
JR
2045 case MSR_VM_CR:
2046 *data = 0;
2047 break;
c8a73f18
AG
2048 case MSR_IA32_UCODE_REV:
2049 *data = 0x01000065;
2050 break;
6aa8b732 2051 default:
3bab1f5d 2052 return kvm_get_msr_common(vcpu, ecx, data);
6aa8b732
AK
2053 }
2054 return 0;
2055}
2056
e756fc62 2057static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 2058{
ad312c7c 2059 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
6aa8b732
AK
2060 u64 data;
2061
e756fc62 2062 if (svm_get_msr(&svm->vcpu, ecx, &data))
c1a5d4f9 2063 kvm_inject_gp(&svm->vcpu, 0);
6aa8b732 2064 else {
af9ca2d7
JR
2065 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
2066 (u32)(data >> 32), handler);
2067
5fdbf976 2068 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
ad312c7c 2069 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
5fdbf976 2070 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
e756fc62 2071 skip_emulated_instruction(&svm->vcpu);
6aa8b732
AK
2072 }
2073 return 1;
2074}
2075
2076static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2077{
a2fa3e9f
GH
2078 struct vcpu_svm *svm = to_svm(vcpu);
2079
6aa8b732 2080 switch (ecx) {
af24a4e4 2081 case MSR_IA32_TSC: {
6aa8b732
AK
2082 u64 tsc;
2083
2084 rdtscll(tsc);
a2fa3e9f 2085 svm->vmcb->control.tsc_offset = data - tsc;
6aa8b732
AK
2086 break;
2087 }
0e859cac 2088 case MSR_K6_STAR:
a2fa3e9f 2089 svm->vmcb->save.star = data;
6aa8b732 2090 break;
49b14f24 2091#ifdef CONFIG_X86_64
6aa8b732 2092 case MSR_LSTAR:
a2fa3e9f 2093 svm->vmcb->save.lstar = data;
6aa8b732
AK
2094 break;
2095 case MSR_CSTAR:
a2fa3e9f 2096 svm->vmcb->save.cstar = data;
6aa8b732
AK
2097 break;
2098 case MSR_KERNEL_GS_BASE:
a2fa3e9f 2099 svm->vmcb->save.kernel_gs_base = data;
6aa8b732
AK
2100 break;
2101 case MSR_SYSCALL_MASK:
a2fa3e9f 2102 svm->vmcb->save.sfmask = data;
6aa8b732
AK
2103 break;
2104#endif
2105 case MSR_IA32_SYSENTER_CS:
a2fa3e9f 2106 svm->vmcb->save.sysenter_cs = data;
6aa8b732
AK
2107 break;
2108 case MSR_IA32_SYSENTER_EIP:
017cb99e 2109 svm->sysenter_eip = data;
a2fa3e9f 2110 svm->vmcb->save.sysenter_eip = data;
6aa8b732
AK
2111 break;
2112 case MSR_IA32_SYSENTER_ESP:
017cb99e 2113 svm->sysenter_esp = data;
a2fa3e9f 2114 svm->vmcb->save.sysenter_esp = data;
6aa8b732 2115 break;
a2938c80 2116 case MSR_IA32_DEBUGCTLMSR:
24e09cbf
JR
2117 if (!svm_has(SVM_FEATURE_LBRV)) {
2118 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
b8688d51 2119 __func__, data);
24e09cbf
JR
2120 break;
2121 }
2122 if (data & DEBUGCTL_RESERVED_BITS)
2123 return 1;
2124
2125 svm->vmcb->save.dbgctl = data;
2126 if (data & (1ULL<<0))
2127 svm_enable_lbrv(svm);
2128 else
2129 svm_disable_lbrv(svm);
a2938c80 2130 break;
b286d5d8
AG
2131 case MSR_VM_HSAVE_PA:
2132 svm->hsave_msr = data;
62b9abaa 2133 break;
6aa8b732 2134 default:
3bab1f5d 2135 return kvm_set_msr_common(vcpu, ecx, data);
6aa8b732
AK
2136 }
2137 return 0;
2138}
2139
e756fc62 2140static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 2141{
ad312c7c 2142 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
5fdbf976 2143 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
ad312c7c 2144 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
af9ca2d7
JR
2145
2146 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
2147 handler);
2148
5fdbf976 2149 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
e756fc62 2150 if (svm_set_msr(&svm->vcpu, ecx, data))
c1a5d4f9 2151 kvm_inject_gp(&svm->vcpu, 0);
6aa8b732 2152 else
e756fc62 2153 skip_emulated_instruction(&svm->vcpu);
6aa8b732
AK
2154 return 1;
2155}
2156
e756fc62 2157static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
6aa8b732 2158{
e756fc62
RR
2159 if (svm->vmcb->control.exit_info_1)
2160 return wrmsr_interception(svm, kvm_run);
6aa8b732 2161 else
e756fc62 2162 return rdmsr_interception(svm, kvm_run);
6aa8b732
AK
2163}
2164
e756fc62 2165static int interrupt_window_interception(struct vcpu_svm *svm,
c1150d8c
DL
2166 struct kvm_run *kvm_run)
2167{
af9ca2d7
JR
2168 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
2169
f0b85051 2170 svm_clear_vintr(svm);
85f455f7 2171 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
c1150d8c
DL
2172 /*
2173 * If the user space waits to inject interrupts, exit as soon as
2174 * possible
2175 */
8061823a
GN
2176 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
2177 kvm_run->request_interrupt_window &&
2178 !kvm_cpu_has_interrupt(&svm->vcpu)) {
e756fc62 2179 ++svm->vcpu.stat.irq_window_exits;
c1150d8c
DL
2180 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2181 return 0;
2182 }
2183
2184 return 1;
2185}
2186
e756fc62 2187static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
6aa8b732
AK
2188 struct kvm_run *kvm_run) = {
2189 [SVM_EXIT_READ_CR0] = emulate_on_interception,
2190 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2191 [SVM_EXIT_READ_CR4] = emulate_on_interception,
80a8119c 2192 [SVM_EXIT_READ_CR8] = emulate_on_interception,
6aa8b732
AK
2193 /* for now: */
2194 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
2195 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2196 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
1d075434 2197 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
6aa8b732
AK
2198 [SVM_EXIT_READ_DR0] = emulate_on_interception,
2199 [SVM_EXIT_READ_DR1] = emulate_on_interception,
2200 [SVM_EXIT_READ_DR2] = emulate_on_interception,
2201 [SVM_EXIT_READ_DR3] = emulate_on_interception,
2202 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
2203 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
2204 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
2205 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
2206 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
2207 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
d0bfb940
JK
2208 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2209 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
7aa81cc0 2210 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
6aa8b732 2211 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
7807fa6c 2212 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
53371b50 2213 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
a0698055 2214 [SVM_EXIT_INTR] = intr_interception,
c47f098d 2215 [SVM_EXIT_NMI] = nmi_interception,
6aa8b732
AK
2216 [SVM_EXIT_SMI] = nop_on_interception,
2217 [SVM_EXIT_INIT] = nop_on_interception,
c1150d8c 2218 [SVM_EXIT_VINTR] = interrupt_window_interception,
6aa8b732
AK
2219 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
2220 [SVM_EXIT_CPUID] = cpuid_interception,
95ba8273 2221 [SVM_EXIT_IRET] = iret_interception,
cf5a94d1 2222 [SVM_EXIT_INVD] = emulate_on_interception,
6aa8b732 2223 [SVM_EXIT_HLT] = halt_interception,
a7052897 2224 [SVM_EXIT_INVLPG] = invlpg_interception,
6aa8b732
AK
2225 [SVM_EXIT_INVLPGA] = invalid_op_interception,
2226 [SVM_EXIT_IOIO] = io_interception,
2227 [SVM_EXIT_MSR] = msr_interception,
2228 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
46fe4ddd 2229 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3d6368ef 2230 [SVM_EXIT_VMRUN] = vmrun_interception,
02e235bc 2231 [SVM_EXIT_VMMCALL] = vmmcall_interception,
5542675b
AG
2232 [SVM_EXIT_VMLOAD] = vmload_interception,
2233 [SVM_EXIT_VMSAVE] = vmsave_interception,
1371d904
AG
2234 [SVM_EXIT_STGI] = stgi_interception,
2235 [SVM_EXIT_CLGI] = clgi_interception,
6aa8b732 2236 [SVM_EXIT_SKINIT] = invalid_op_interception,
cf5a94d1 2237 [SVM_EXIT_WBINVD] = emulate_on_interception,
916ce236
JR
2238 [SVM_EXIT_MONITOR] = invalid_op_interception,
2239 [SVM_EXIT_MWAIT] = invalid_op_interception,
709ddebf 2240 [SVM_EXIT_NPF] = pf_interception,
6aa8b732
AK
2241};
2242
04d2cc77 2243static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
6aa8b732 2244{
04d2cc77 2245 struct vcpu_svm *svm = to_svm(vcpu);
a2fa3e9f 2246 u32 exit_code = svm->vmcb->control.exit_code;
6aa8b732 2247
af9ca2d7
JR
2248 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
2249 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
2250
cf74a78b
AG
2251 if (is_nested(svm)) {
2252 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2253 exit_code, svm->vmcb->control.exit_info_1,
2254 svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
2255 if (nested_svm_exit_handled(svm, true)) {
2256 nested_svm_vmexit(svm);
2257 nsvm_printk("-> #VMEXIT\n");
2258 return 1;
2259 }
2260 }
2261
709ddebf
JR
2262 if (npt_enabled) {
2263 int mmu_reload = 0;
2264 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
2265 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2266 mmu_reload = 1;
2267 }
2268 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2269 vcpu->arch.cr3 = svm->vmcb->save.cr3;
709ddebf
JR
2270 if (mmu_reload) {
2271 kvm_mmu_reset_context(vcpu);
2272 kvm_mmu_load(vcpu);
2273 }
2274 }
2275
04d2cc77
AK
2276
2277 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2278 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2279 kvm_run->fail_entry.hardware_entry_failure_reason
2280 = svm->vmcb->control.exit_code;
2281 return 0;
2282 }
2283
a2fa3e9f 2284 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
709ddebf 2285 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
fe8e7f83 2286 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
6aa8b732
AK
2287 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
2288 "exit_code 0x%x\n",
b8688d51 2289 __func__, svm->vmcb->control.exit_int_info,
6aa8b732
AK
2290 exit_code);
2291
9d8f549d 2292 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
56919c5c 2293 || !svm_exit_handlers[exit_code]) {
6aa8b732 2294 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
364b625b 2295 kvm_run->hw.hardware_exit_reason = exit_code;
6aa8b732
AK
2296 return 0;
2297 }
2298
e756fc62 2299 return svm_exit_handlers[exit_code](svm, kvm_run);
6aa8b732
AK
2300}
2301
2302static void reload_tss(struct kvm_vcpu *vcpu)
2303{
2304 int cpu = raw_smp_processor_id();
2305
2306 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
d77c26fc 2307 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
6aa8b732
AK
2308 load_TR_desc();
2309}
2310
e756fc62 2311static void pre_svm_run(struct vcpu_svm *svm)
6aa8b732
AK
2312{
2313 int cpu = raw_smp_processor_id();
2314
2315 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
2316
a2fa3e9f 2317 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4b656b12
MT
2318 /* FIXME: handle wraparound of asid_generation */
2319 if (svm->asid_generation != svm_data->asid_generation)
e756fc62 2320 new_asid(svm, svm_data);
6aa8b732
AK
2321}
2322
95ba8273
GN
2323static void svm_inject_nmi(struct kvm_vcpu *vcpu)
2324{
2325 struct vcpu_svm *svm = to_svm(vcpu);
2326
2327 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
2328 vcpu->arch.hflags |= HF_NMI_MASK;
2329 svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2330 ++vcpu->stat.nmi_injections;
2331}
6aa8b732 2332
85f455f7 2333static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
6aa8b732
AK
2334{
2335 struct vmcb_control_area *control;
2336
af9ca2d7
JR
2337 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
2338
fa89a817 2339 ++svm->vcpu.stat.irq_injections;
e756fc62 2340 control = &svm->vmcb->control;
85f455f7 2341 control->int_vector = irq;
6aa8b732
AK
2342 control->int_ctl &= ~V_INTR_PRIO_MASK;
2343 control->int_ctl |= V_IRQ_MASK |
2344 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2345}
2346
95ba8273 2347static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
9222be18 2348{
95ba8273
GN
2349 struct vcpu_svm *svm = to_svm(vcpu);
2350
9222be18
GN
2351 svm->vmcb->control.event_inj = nr |
2352 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2353}
2354
66fd3f7f 2355static void svm_set_irq(struct kvm_vcpu *vcpu)
2a8067f1
ED
2356{
2357 struct vcpu_svm *svm = to_svm(vcpu);
2358
cf74a78b
AG
2359 nested_svm_intr(svm);
2360
66fd3f7f 2361 svm_queue_irq(vcpu, vcpu->arch.interrupt.nr);
2a8067f1
ED
2362}
2363
95ba8273 2364static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
aaacfc9a
JR
2365{
2366 struct vcpu_svm *svm = to_svm(vcpu);
aaacfc9a 2367
95ba8273 2368 if (irr == -1)
aaacfc9a
JR
2369 return;
2370
95ba8273
GN
2371 if (tpr >= irr)
2372 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
2373}
aaacfc9a 2374
95ba8273
GN
2375static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
2376{
2377 struct vcpu_svm *svm = to_svm(vcpu);
2378 struct vmcb *vmcb = svm->vmcb;
2379 return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2380 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
aaacfc9a
JR
2381}
2382
78646121
GN
2383static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
2384{
2385 struct vcpu_svm *svm = to_svm(vcpu);
2386 struct vmcb *vmcb = svm->vmcb;
2387 return (vmcb->save.rflags & X86_EFLAGS_IF) &&
2388 !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2389 (svm->vcpu.arch.hflags & HF_GIF_MASK);
2390}
2391
9222be18 2392static void enable_irq_window(struct kvm_vcpu *vcpu)
6aa8b732 2393{
9222be18
GN
2394 svm_set_vintr(to_svm(vcpu));
2395 svm_inject_irq(to_svm(vcpu), 0x0);
85f455f7
ED
2396}
2397
95ba8273 2398static void enable_nmi_window(struct kvm_vcpu *vcpu)
c1150d8c 2399{
04d2cc77 2400 struct vcpu_svm *svm = to_svm(vcpu);
c1150d8c 2401
44c11430
GN
2402 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
2403 == HF_NMI_MASK)
2404 return; /* IRET will cause a vm exit */
2405
2406 /* Something prevents NMI from been injected. Single step over
2407 possible problem (IRET or exception injection or interrupt
2408 shadow) */
2409 vcpu->arch.singlestep = true;
2410 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2411 update_db_intercept(vcpu);
c1150d8c
DL
2412}
2413
cbc94022
IE
2414static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
2415{
2416 return 0;
2417}
2418
d9e368d6
AK
2419static void svm_flush_tlb(struct kvm_vcpu *vcpu)
2420{
2421 force_new_asid(vcpu);
2422}
2423
04d2cc77
AK
2424static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
2425{
2426}
2427
d7bf8221
JR
2428static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
2429{
2430 struct vcpu_svm *svm = to_svm(vcpu);
2431
2432 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
2433 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
615d5193 2434 kvm_set_cr8(vcpu, cr8);
d7bf8221
JR
2435 }
2436}
2437
649d6864
JR
2438static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
2439{
2440 struct vcpu_svm *svm = to_svm(vcpu);
2441 u64 cr8;
2442
649d6864
JR
2443 cr8 = kvm_get_cr8(vcpu);
2444 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
2445 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
2446}
2447
9222be18
GN
2448static void svm_complete_interrupts(struct vcpu_svm *svm)
2449{
2450 u8 vector;
2451 int type;
2452 u32 exitintinfo = svm->vmcb->control.exit_int_info;
2453
44c11430
GN
2454 if (svm->vcpu.arch.hflags & HF_IRET_MASK)
2455 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
2456
9222be18
GN
2457 svm->vcpu.arch.nmi_injected = false;
2458 kvm_clear_exception_queue(&svm->vcpu);
2459 kvm_clear_interrupt_queue(&svm->vcpu);
2460
2461 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
2462 return;
2463
2464 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
2465 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
2466
2467 switch (type) {
2468 case SVM_EXITINTINFO_TYPE_NMI:
2469 svm->vcpu.arch.nmi_injected = true;
2470 break;
2471 case SVM_EXITINTINFO_TYPE_EXEPT:
2472 /* In case of software exception do not reinject an exception
2473 vector, but re-execute and instruction instead */
66fd3f7f 2474 if (kvm_exception_is_soft(vector))
9222be18
GN
2475 break;
2476 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
2477 u32 err = svm->vmcb->control.exit_int_info_err;
2478 kvm_queue_exception_e(&svm->vcpu, vector, err);
2479
2480 } else
2481 kvm_queue_exception(&svm->vcpu, vector);
2482 break;
2483 case SVM_EXITINTINFO_TYPE_INTR:
66fd3f7f 2484 kvm_queue_interrupt(&svm->vcpu, vector, false);
9222be18
GN
2485 break;
2486 default:
2487 break;
2488 }
2489}
2490
80e31d4f
AK
2491#ifdef CONFIG_X86_64
2492#define R "r"
2493#else
2494#define R "e"
2495#endif
2496
04d2cc77 2497static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6aa8b732 2498{
a2fa3e9f 2499 struct vcpu_svm *svm = to_svm(vcpu);
6aa8b732
AK
2500 u16 fs_selector;
2501 u16 gs_selector;
2502 u16 ldt_selector;
d9e368d6 2503
5fdbf976
MT
2504 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
2505 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2506 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
2507
e756fc62 2508 pre_svm_run(svm);
6aa8b732 2509
649d6864
JR
2510 sync_lapic_to_cr8(vcpu);
2511
6aa8b732 2512 save_host_msrs(vcpu);
d6e88aec
AK
2513 fs_selector = kvm_read_fs();
2514 gs_selector = kvm_read_gs();
2515 ldt_selector = kvm_read_ldt();
3d6368ef
AG
2516 if (!is_nested(svm))
2517 svm->vmcb->save.cr2 = vcpu->arch.cr2;
709ddebf
JR
2518 /* required for live migration with NPT */
2519 if (npt_enabled)
2520 svm->vmcb->save.cr3 = vcpu->arch.cr3;
6aa8b732 2521
04d2cc77
AK
2522 clgi();
2523
2524 local_irq_enable();
36241b8c 2525
6aa8b732 2526 asm volatile (
80e31d4f
AK
2527 "push %%"R"bp; \n\t"
2528 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
2529 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
2530 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
2531 "mov %c[rsi](%[svm]), %%"R"si \n\t"
2532 "mov %c[rdi](%[svm]), %%"R"di \n\t"
2533 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
05b3e0c2 2534#ifdef CONFIG_X86_64
fb3f0f51
RR
2535 "mov %c[r8](%[svm]), %%r8 \n\t"
2536 "mov %c[r9](%[svm]), %%r9 \n\t"
2537 "mov %c[r10](%[svm]), %%r10 \n\t"
2538 "mov %c[r11](%[svm]), %%r11 \n\t"
2539 "mov %c[r12](%[svm]), %%r12 \n\t"
2540 "mov %c[r13](%[svm]), %%r13 \n\t"
2541 "mov %c[r14](%[svm]), %%r14 \n\t"
2542 "mov %c[r15](%[svm]), %%r15 \n\t"
6aa8b732
AK
2543#endif
2544
6aa8b732 2545 /* Enter guest mode */
80e31d4f
AK
2546 "push %%"R"ax \n\t"
2547 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
4ecac3fd
AK
2548 __ex(SVM_VMLOAD) "\n\t"
2549 __ex(SVM_VMRUN) "\n\t"
2550 __ex(SVM_VMSAVE) "\n\t"
80e31d4f 2551 "pop %%"R"ax \n\t"
6aa8b732
AK
2552
2553 /* Save guest registers, load host registers */
80e31d4f
AK
2554 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
2555 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
2556 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
2557 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
2558 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
2559 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
05b3e0c2 2560#ifdef CONFIG_X86_64
fb3f0f51
RR
2561 "mov %%r8, %c[r8](%[svm]) \n\t"
2562 "mov %%r9, %c[r9](%[svm]) \n\t"
2563 "mov %%r10, %c[r10](%[svm]) \n\t"
2564 "mov %%r11, %c[r11](%[svm]) \n\t"
2565 "mov %%r12, %c[r12](%[svm]) \n\t"
2566 "mov %%r13, %c[r13](%[svm]) \n\t"
2567 "mov %%r14, %c[r14](%[svm]) \n\t"
2568 "mov %%r15, %c[r15](%[svm]) \n\t"
6aa8b732 2569#endif
80e31d4f 2570 "pop %%"R"bp"
6aa8b732 2571 :
fb3f0f51 2572 : [svm]"a"(svm),
6aa8b732 2573 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
ad312c7c
ZX
2574 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
2575 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
2576 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
2577 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
2578 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
2579 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
05b3e0c2 2580#ifdef CONFIG_X86_64
ad312c7c
ZX
2581 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
2582 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
2583 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
2584 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
2585 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
2586 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
2587 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
2588 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
6aa8b732 2589#endif
54a08c04 2590 : "cc", "memory"
80e31d4f 2591 , R"bx", R"cx", R"dx", R"si", R"di"
54a08c04 2592#ifdef CONFIG_X86_64
54a08c04
LV
2593 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
2594#endif
2595 );
6aa8b732 2596
ad312c7c 2597 vcpu->arch.cr2 = svm->vmcb->save.cr2;
5fdbf976
MT
2598 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
2599 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2600 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
6aa8b732 2601
d6e88aec
AK
2602 kvm_load_fs(fs_selector);
2603 kvm_load_gs(gs_selector);
2604 kvm_load_ldt(ldt_selector);
6aa8b732
AK
2605 load_host_msrs(vcpu);
2606
2607 reload_tss(vcpu);
2608
56ba47dd
AK
2609 local_irq_disable();
2610
2611 stgi();
2612
d7bf8221
JR
2613 sync_cr8_to_lapic(vcpu);
2614
a2fa3e9f 2615 svm->next_rip = 0;
9222be18 2616
6de4f3ad
AK
2617 if (npt_enabled) {
2618 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
2619 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
2620 }
2621
9222be18 2622 svm_complete_interrupts(svm);
6aa8b732
AK
2623}
2624
80e31d4f
AK
2625#undef R
2626
6aa8b732
AK
2627static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
2628{
a2fa3e9f
GH
2629 struct vcpu_svm *svm = to_svm(vcpu);
2630
709ddebf
JR
2631 if (npt_enabled) {
2632 svm->vmcb->control.nested_cr3 = root;
2633 force_new_asid(vcpu);
2634 return;
2635 }
2636
a2fa3e9f 2637 svm->vmcb->save.cr3 = root;
6aa8b732 2638 force_new_asid(vcpu);
7807fa6c
AL
2639
2640 if (vcpu->fpu_active) {
a2fa3e9f
GH
2641 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
2642 svm->vmcb->save.cr0 |= X86_CR0_TS;
7807fa6c
AL
2643 vcpu->fpu_active = 0;
2644 }
6aa8b732
AK
2645}
2646
6aa8b732
AK
2647static int is_disabled(void)
2648{
6031a61c
JR
2649 u64 vm_cr;
2650
2651 rdmsrl(MSR_VM_CR, vm_cr);
2652 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
2653 return 1;
2654
6aa8b732
AK
2655 return 0;
2656}
2657
102d8325
IM
2658static void
2659svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2660{
2661 /*
2662 * Patch in the VMMCALL instruction:
2663 */
2664 hypercall[0] = 0x0f;
2665 hypercall[1] = 0x01;
2666 hypercall[2] = 0xd9;
102d8325
IM
2667}
2668
002c7f7c
YS
2669static void svm_check_processor_compat(void *rtn)
2670{
2671 *(int *)rtn = 0;
2672}
2673
774ead3a
AK
2674static bool svm_cpu_has_accelerated_tpr(void)
2675{
2676 return false;
2677}
2678
67253af5
SY
2679static int get_npt_level(void)
2680{
2681#ifdef CONFIG_X86_64
2682 return PT64_ROOT_LEVEL;
2683#else
2684 return PT32E_ROOT_LEVEL;
2685#endif
2686}
2687
4b12f0de 2688static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
64d4d521
SY
2689{
2690 return 0;
2691}
2692
cbdd1bea 2693static struct kvm_x86_ops svm_x86_ops = {
6aa8b732
AK
2694 .cpu_has_kvm_support = has_svm,
2695 .disabled_by_bios = is_disabled,
2696 .hardware_setup = svm_hardware_setup,
2697 .hardware_unsetup = svm_hardware_unsetup,
002c7f7c 2698 .check_processor_compatibility = svm_check_processor_compat,
6aa8b732
AK
2699 .hardware_enable = svm_hardware_enable,
2700 .hardware_disable = svm_hardware_disable,
774ead3a 2701 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
6aa8b732
AK
2702
2703 .vcpu_create = svm_create_vcpu,
2704 .vcpu_free = svm_free_vcpu,
04d2cc77 2705 .vcpu_reset = svm_vcpu_reset,
6aa8b732 2706
04d2cc77 2707 .prepare_guest_switch = svm_prepare_guest_switch,
6aa8b732
AK
2708 .vcpu_load = svm_vcpu_load,
2709 .vcpu_put = svm_vcpu_put,
2710
2711 .set_guest_debug = svm_guest_debug,
2712 .get_msr = svm_get_msr,
2713 .set_msr = svm_set_msr,
2714 .get_segment_base = svm_get_segment_base,
2715 .get_segment = svm_get_segment,
2716 .set_segment = svm_set_segment,
2e4d2653 2717 .get_cpl = svm_get_cpl,
1747fb71 2718 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
25c4c276 2719 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
6aa8b732 2720 .set_cr0 = svm_set_cr0,
6aa8b732
AK
2721 .set_cr3 = svm_set_cr3,
2722 .set_cr4 = svm_set_cr4,
2723 .set_efer = svm_set_efer,
2724 .get_idt = svm_get_idt,
2725 .set_idt = svm_set_idt,
2726 .get_gdt = svm_get_gdt,
2727 .set_gdt = svm_set_gdt,
2728 .get_dr = svm_get_dr,
2729 .set_dr = svm_set_dr,
6de4f3ad 2730 .cache_reg = svm_cache_reg,
6aa8b732
AK
2731 .get_rflags = svm_get_rflags,
2732 .set_rflags = svm_set_rflags,
2733
6aa8b732 2734 .tlb_flush = svm_flush_tlb,
6aa8b732 2735
6aa8b732 2736 .run = svm_vcpu_run,
04d2cc77 2737 .handle_exit = handle_exit,
6aa8b732 2738 .skip_emulated_instruction = skip_emulated_instruction,
2809f5d2
GC
2739 .set_interrupt_shadow = svm_set_interrupt_shadow,
2740 .get_interrupt_shadow = svm_get_interrupt_shadow,
102d8325 2741 .patch_hypercall = svm_patch_hypercall,
2a8067f1 2742 .set_irq = svm_set_irq,
95ba8273 2743 .set_nmi = svm_inject_nmi,
298101da 2744 .queue_exception = svm_queue_exception,
78646121 2745 .interrupt_allowed = svm_interrupt_allowed,
95ba8273
GN
2746 .nmi_allowed = svm_nmi_allowed,
2747 .enable_nmi_window = enable_nmi_window,
2748 .enable_irq_window = enable_irq_window,
2749 .update_cr8_intercept = update_cr8_intercept,
cbc94022
IE
2750
2751 .set_tss_addr = svm_set_tss_addr,
67253af5 2752 .get_tdp_level = get_npt_level,
4b12f0de 2753 .get_mt_mask = svm_get_mt_mask,
6aa8b732
AK
2754};
2755
2756static int __init svm_init(void)
2757{
cb498ea2 2758 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
c16f862d 2759 THIS_MODULE);
6aa8b732
AK
2760}
2761
2762static void __exit svm_exit(void)
2763{
cb498ea2 2764 kvm_exit();
6aa8b732
AK
2765}
2766
2767module_init(svm_init)
2768module_exit(svm_exit)