]>
Commit | Line | Data |
---|---|---|
883b0a91 JR |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Kernel-based Virtual Machine driver for Linux | |
4 | * | |
5 | * AMD SVM support | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
9 | * | |
10 | * Authors: | |
11 | * Yaniv Kamay <yaniv@qumranet.com> | |
12 | * Avi Kivity <avi@qumranet.com> | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) "SVM: " fmt | |
16 | ||
17 | #include <linux/kvm_types.h> | |
18 | #include <linux/kvm_host.h> | |
19 | #include <linux/kernel.h> | |
20 | ||
21 | #include <asm/msr-index.h> | |
5679b803 | 22 | #include <asm/debugreg.h> |
883b0a91 JR |
23 | |
24 | #include "kvm_emulate.h" | |
25 | #include "trace.h" | |
26 | #include "mmu.h" | |
27 | #include "x86.h" | |
cc440cda | 28 | #include "cpuid.h" |
5b672408 | 29 | #include "lapic.h" |
883b0a91 JR |
30 | #include "svm.h" |
31 | ||
32 | static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, | |
33 | struct x86_exception *fault) | |
34 | { | |
35 | struct vcpu_svm *svm = to_svm(vcpu); | |
36 | ||
37 | if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { | |
38 | /* | |
39 | * TODO: track the cause of the nested page fault, and | |
40 | * correctly fill in the high bits of exit_info_1. | |
41 | */ | |
42 | svm->vmcb->control.exit_code = SVM_EXIT_NPF; | |
43 | svm->vmcb->control.exit_code_hi = 0; | |
44 | svm->vmcb->control.exit_info_1 = (1ULL << 32); | |
45 | svm->vmcb->control.exit_info_2 = fault->address; | |
46 | } | |
47 | ||
48 | svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; | |
49 | svm->vmcb->control.exit_info_1 |= fault->error_code; | |
50 | ||
51 | /* | |
52 | * The present bit is always zero for page structure faults on real | |
53 | * hardware. | |
54 | */ | |
55 | if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) | |
56 | svm->vmcb->control.exit_info_1 &= ~1; | |
57 | ||
58 | nested_svm_vmexit(svm); | |
59 | } | |
60 | ||
61 | static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) | |
62 | { | |
63 | struct vcpu_svm *svm = to_svm(vcpu); | |
e670bf68 | 64 | u64 cr3 = svm->nested.ctl.nested_cr3; |
883b0a91 JR |
65 | u64 pdpte; |
66 | int ret; | |
67 | ||
68 | ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, | |
69 | offset_in_page(cr3) + index * 8, 8); | |
70 | if (ret) | |
71 | return 0; | |
72 | return pdpte; | |
73 | } | |
74 | ||
75 | static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) | |
76 | { | |
77 | struct vcpu_svm *svm = to_svm(vcpu); | |
78 | ||
e670bf68 | 79 | return svm->nested.ctl.nested_cr3; |
883b0a91 JR |
80 | } |
81 | ||
82 | static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) | |
83 | { | |
929d1cfa PB |
84 | struct vcpu_svm *svm = to_svm(vcpu); |
85 | struct vmcb *hsave = svm->nested.hsave; | |
86 | ||
883b0a91 JR |
87 | WARN_ON(mmu_is_nested(vcpu)); |
88 | ||
89 | vcpu->arch.mmu = &vcpu->arch.guest_mmu; | |
0f04a2ac VK |
90 | kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer, |
91 | svm->nested.ctl.nested_cr3); | |
883b0a91 JR |
92 | vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; |
93 | vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; | |
94 | vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; | |
e93fd3b3 | 95 | vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level; |
883b0a91 JR |
96 | reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); |
97 | vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; | |
98 | } | |
99 | ||
100 | static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) | |
101 | { | |
102 | vcpu->arch.mmu = &vcpu->arch.root_mmu; | |
103 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; | |
104 | } | |
105 | ||
106 | void recalc_intercepts(struct vcpu_svm *svm) | |
107 | { | |
e670bf68 | 108 | struct vmcb_control_area *c, *h, *g; |
883b0a91 | 109 | |
06e7852c | 110 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
883b0a91 JR |
111 | |
112 | if (!is_guest_mode(&svm->vcpu)) | |
113 | return; | |
114 | ||
115 | c = &svm->vmcb->control; | |
116 | h = &svm->nested.hsave->control; | |
e670bf68 | 117 | g = &svm->nested.ctl; |
883b0a91 | 118 | |
7c86663b PB |
119 | svm->nested.host_intercept_exceptions = h->intercept_exceptions; |
120 | ||
883b0a91 JR |
121 | c->intercept_cr = h->intercept_cr; |
122 | c->intercept_dr = h->intercept_dr; | |
123 | c->intercept_exceptions = h->intercept_exceptions; | |
124 | c->intercept = h->intercept; | |
125 | ||
e9fd761a | 126 | if (g->int_ctl & V_INTR_MASKING_MASK) { |
883b0a91 JR |
127 | /* We only want the cr8 intercept bits of L1 */ |
128 | c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); | |
129 | c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); | |
130 | ||
131 | /* | |
132 | * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not | |
133 | * affect any interrupt we may want to inject; therefore, | |
134 | * interrupt window vmexits are irrelevant to L0. | |
135 | */ | |
136 | c->intercept &= ~(1ULL << INTERCEPT_VINTR); | |
137 | } | |
138 | ||
139 | /* We don't want to see VMMCALLs from a nested guest */ | |
140 | c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); | |
141 | ||
142 | c->intercept_cr |= g->intercept_cr; | |
143 | c->intercept_dr |= g->intercept_dr; | |
144 | c->intercept_exceptions |= g->intercept_exceptions; | |
145 | c->intercept |= g->intercept; | |
146 | } | |
147 | ||
2f675917 PB |
148 | static void copy_vmcb_control_area(struct vmcb_control_area *dst, |
149 | struct vmcb_control_area *from) | |
883b0a91 | 150 | { |
883b0a91 JR |
151 | dst->intercept_cr = from->intercept_cr; |
152 | dst->intercept_dr = from->intercept_dr; | |
153 | dst->intercept_exceptions = from->intercept_exceptions; | |
154 | dst->intercept = from->intercept; | |
155 | dst->iopm_base_pa = from->iopm_base_pa; | |
156 | dst->msrpm_base_pa = from->msrpm_base_pa; | |
157 | dst->tsc_offset = from->tsc_offset; | |
6c0238c4 | 158 | /* asid not copied, it is handled manually for svm->vmcb. */ |
883b0a91 JR |
159 | dst->tlb_ctl = from->tlb_ctl; |
160 | dst->int_ctl = from->int_ctl; | |
161 | dst->int_vector = from->int_vector; | |
162 | dst->int_state = from->int_state; | |
163 | dst->exit_code = from->exit_code; | |
164 | dst->exit_code_hi = from->exit_code_hi; | |
165 | dst->exit_info_1 = from->exit_info_1; | |
166 | dst->exit_info_2 = from->exit_info_2; | |
167 | dst->exit_int_info = from->exit_int_info; | |
168 | dst->exit_int_info_err = from->exit_int_info_err; | |
169 | dst->nested_ctl = from->nested_ctl; | |
170 | dst->event_inj = from->event_inj; | |
171 | dst->event_inj_err = from->event_inj_err; | |
172 | dst->nested_cr3 = from->nested_cr3; | |
173 | dst->virt_ext = from->virt_ext; | |
174 | dst->pause_filter_count = from->pause_filter_count; | |
175 | dst->pause_filter_thresh = from->pause_filter_thresh; | |
176 | } | |
177 | ||
178 | static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) | |
179 | { | |
180 | /* | |
181 | * This function merges the msr permission bitmaps of kvm and the | |
182 | * nested vmcb. It is optimized in that it only merges the parts where | |
183 | * the kvm msr permission bitmap may contain zero bits | |
184 | */ | |
185 | int i; | |
186 | ||
e670bf68 | 187 | if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
883b0a91 JR |
188 | return true; |
189 | ||
190 | for (i = 0; i < MSRPM_OFFSETS; i++) { | |
191 | u32 value, p; | |
192 | u64 offset; | |
193 | ||
194 | if (msrpm_offsets[i] == 0xffffffff) | |
195 | break; | |
196 | ||
197 | p = msrpm_offsets[i]; | |
e670bf68 | 198 | offset = svm->nested.ctl.msrpm_base_pa + (p * 4); |
883b0a91 JR |
199 | |
200 | if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) | |
201 | return false; | |
202 | ||
203 | svm->nested.msrpm[p] = svm->msrpm[p] | value; | |
204 | } | |
205 | ||
206 | svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); | |
207 | ||
208 | return true; | |
209 | } | |
210 | ||
ca46d739 | 211 | static bool nested_vmcb_check_controls(struct vmcb_control_area *control) |
883b0a91 | 212 | { |
ca46d739 | 213 | if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0) |
883b0a91 JR |
214 | return false; |
215 | ||
ca46d739 | 216 | if (control->asid == 0) |
4f233371 KS |
217 | return false; |
218 | ||
ca46d739 PB |
219 | if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && |
220 | !npt_enabled) | |
883b0a91 JR |
221 | return false; |
222 | ||
ca46d739 PB |
223 | return true; |
224 | } | |
225 | ||
761e4169 | 226 | static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb) |
ca46d739 | 227 | { |
761e4169 | 228 | bool nested_vmcb_lma; |
ca46d739 | 229 | if ((vmcb->save.efer & EFER_SVME) == 0) |
883b0a91 JR |
230 | return false; |
231 | ||
ca46d739 PB |
232 | if (((vmcb->save.cr0 & X86_CR0_CD) == 0) && |
233 | (vmcb->save.cr0 & X86_CR0_NW)) | |
883b0a91 JR |
234 | return false; |
235 | ||
1aef8161 KS |
236 | if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7)) |
237 | return false; | |
238 | ||
761e4169 KS |
239 | nested_vmcb_lma = |
240 | (vmcb->save.efer & EFER_LME) && | |
241 | (vmcb->save.cr0 & X86_CR0_PG); | |
242 | ||
243 | if (!nested_vmcb_lma) { | |
244 | if (vmcb->save.cr4 & X86_CR4_PAE) { | |
245 | if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK) | |
246 | return false; | |
247 | } else { | |
248 | if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK) | |
249 | return false; | |
250 | } | |
251 | } else { | |
252 | if (!(vmcb->save.cr4 & X86_CR4_PAE) || | |
253 | !(vmcb->save.cr0 & X86_CR0_PE) || | |
254 | (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK)) | |
255 | return false; | |
256 | } | |
257 | if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4)) | |
258 | return false; | |
259 | ||
ca46d739 | 260 | return nested_vmcb_check_controls(&vmcb->control); |
883b0a91 JR |
261 | } |
262 | ||
3e06f016 PB |
263 | static void load_nested_vmcb_control(struct vcpu_svm *svm, |
264 | struct vmcb_control_area *control) | |
265 | { | |
e670bf68 | 266 | copy_vmcb_control_area(&svm->nested.ctl, control); |
3e06f016 | 267 | |
cc440cda PB |
268 | /* Copy it here because nested_svm_check_controls will check it. */ |
269 | svm->nested.ctl.asid = control->asid; | |
e670bf68 PB |
270 | svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; |
271 | svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; | |
3e06f016 PB |
272 | } |
273 | ||
2d8a42be PB |
274 | /* |
275 | * Synchronize fields that are written by the processor, so that | |
276 | * they can be copied back into the nested_vmcb. | |
277 | */ | |
278 | void sync_nested_vmcb_control(struct vcpu_svm *svm) | |
279 | { | |
280 | u32 mask; | |
281 | svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; | |
282 | svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; | |
283 | ||
284 | /* Only a few fields of int_ctl are written by the processor. */ | |
285 | mask = V_IRQ_MASK | V_TPR_MASK; | |
286 | if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && | |
a284ba56 | 287 | svm_is_intercept(svm, INTERCEPT_VINTR)) { |
2d8a42be PB |
288 | /* |
289 | * In order to request an interrupt window, L0 is usurping | |
290 | * svm->vmcb->control.int_ctl and possibly setting V_IRQ | |
291 | * even if it was clear in L1's VMCB. Restoring it would be | |
292 | * wrong. However, in this case V_IRQ will remain true until | |
293 | * interrupt_window_interception calls svm_clear_vintr and | |
294 | * restores int_ctl. We can just leave it aside. | |
295 | */ | |
296 | mask &= ~V_IRQ_MASK; | |
297 | } | |
298 | svm->nested.ctl.int_ctl &= ~mask; | |
299 | svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; | |
300 | } | |
301 | ||
36e2e983 PB |
302 | /* |
303 | * Transfer any event that L0 or L1 wanted to inject into L2 to | |
304 | * EXIT_INT_INFO. | |
305 | */ | |
306 | static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, | |
307 | struct vmcb *nested_vmcb) | |
308 | { | |
309 | struct kvm_vcpu *vcpu = &svm->vcpu; | |
310 | u32 exit_int_info = 0; | |
311 | unsigned int nr; | |
312 | ||
313 | if (vcpu->arch.exception.injected) { | |
314 | nr = vcpu->arch.exception.nr; | |
315 | exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; | |
316 | ||
317 | if (vcpu->arch.exception.has_error_code) { | |
318 | exit_int_info |= SVM_EVTINJ_VALID_ERR; | |
319 | nested_vmcb->control.exit_int_info_err = | |
320 | vcpu->arch.exception.error_code; | |
321 | } | |
322 | ||
323 | } else if (vcpu->arch.nmi_injected) { | |
324 | exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; | |
325 | ||
326 | } else if (vcpu->arch.interrupt.injected) { | |
327 | nr = vcpu->arch.interrupt.nr; | |
328 | exit_int_info = nr | SVM_EVTINJ_VALID; | |
329 | ||
330 | if (vcpu->arch.interrupt.soft) | |
331 | exit_int_info |= SVM_EVTINJ_TYPE_SOFT; | |
332 | else | |
333 | exit_int_info |= SVM_EVTINJ_TYPE_INTR; | |
334 | } | |
335 | ||
336 | nested_vmcb->control.exit_int_info = exit_int_info; | |
337 | } | |
338 | ||
62156f6c VK |
339 | static inline bool nested_npt_enabled(struct vcpu_svm *svm) |
340 | { | |
341 | return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; | |
342 | } | |
343 | ||
344 | /* | |
345 | * Load guest's cr3 at nested entry. @nested_npt is true if we are | |
346 | * emulating VM-Entry into a guest with NPT enabled. | |
347 | */ | |
348 | static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, | |
349 | bool nested_npt) | |
350 | { | |
351 | return kvm_set_cr3(vcpu, cr3); | |
352 | } | |
353 | ||
f241d711 | 354 | static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) |
883b0a91 | 355 | { |
883b0a91 JR |
356 | /* Load the nested guest state */ |
357 | svm->vmcb->save.es = nested_vmcb->save.es; | |
358 | svm->vmcb->save.cs = nested_vmcb->save.cs; | |
359 | svm->vmcb->save.ss = nested_vmcb->save.ss; | |
360 | svm->vmcb->save.ds = nested_vmcb->save.ds; | |
361 | svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; | |
362 | svm->vmcb->save.idtr = nested_vmcb->save.idtr; | |
363 | kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); | |
364 | svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); | |
365 | svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); | |
366 | svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); | |
62156f6c VK |
367 | (void)nested_svm_load_cr3(&svm->vcpu, nested_vmcb->save.cr3, |
368 | nested_npt_enabled(svm)); | |
883b0a91 | 369 | |
883b0a91 JR |
370 | svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; |
371 | kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); | |
372 | kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); | |
373 | kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); | |
374 | ||
375 | /* In case we don't even reach vcpu_run, the fields are not updated */ | |
376 | svm->vmcb->save.rax = nested_vmcb->save.rax; | |
377 | svm->vmcb->save.rsp = nested_vmcb->save.rsp; | |
378 | svm->vmcb->save.rip = nested_vmcb->save.rip; | |
379 | svm->vmcb->save.dr7 = nested_vmcb->save.dr7; | |
5679b803 | 380 | svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; |
883b0a91 | 381 | svm->vmcb->save.cpl = nested_vmcb->save.cpl; |
f241d711 | 382 | } |
883b0a91 | 383 | |
e670bf68 | 384 | static void nested_prepare_vmcb_control(struct vcpu_svm *svm) |
f241d711 | 385 | { |
91b7130c | 386 | const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; |
62156f6c VK |
387 | |
388 | if (nested_npt_enabled(svm)) | |
69cb8774 PB |
389 | nested_svm_init_mmu_context(&svm->vcpu); |
390 | ||
391 | /* Guest paging mode is active - reset mmu */ | |
392 | kvm_mmu_reset_context(&svm->vcpu); | |
393 | ||
f55ac304 | 394 | svm_flush_tlb(&svm->vcpu); |
883b0a91 | 395 | |
18fc6c55 | 396 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = |
e670bf68 | 397 | svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; |
883b0a91 | 398 | |
91b7130c PB |
399 | svm->vmcb->control.int_ctl = |
400 | (svm->nested.ctl.int_ctl & ~mask) | | |
401 | (svm->nested.hsave->control.int_ctl & mask); | |
402 | ||
e670bf68 PB |
403 | svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; |
404 | svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; | |
405 | svm->vmcb->control.int_state = svm->nested.ctl.int_state; | |
406 | svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; | |
407 | svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; | |
883b0a91 | 408 | |
e670bf68 PB |
409 | svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; |
410 | svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; | |
883b0a91 | 411 | |
883b0a91 JR |
412 | /* Enter Guest-Mode */ |
413 | enter_guest_mode(&svm->vcpu); | |
414 | ||
415 | /* | |
416 | * Merge guest and host intercepts - must be called with vcpu in | |
417 | * guest-mode to take affect here | |
418 | */ | |
419 | recalc_intercepts(svm); | |
420 | ||
06e7852c | 421 | vmcb_mark_all_dirty(svm->vmcb); |
f241d711 PB |
422 | } |
423 | ||
59cd9bc5 | 424 | int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, |
f241d711 PB |
425 | struct vmcb *nested_vmcb) |
426 | { | |
f241d711 | 427 | svm->nested.vmcb = vmcb_gpa; |
f241d711 PB |
428 | load_nested_vmcb_control(svm, &nested_vmcb->control); |
429 | nested_prepare_vmcb_save(svm, nested_vmcb); | |
e670bf68 | 430 | nested_prepare_vmcb_control(svm); |
f241d711 | 431 | |
ffdf7f9e | 432 | svm_set_gif(svm, true); |
59cd9bc5 VK |
433 | |
434 | return 0; | |
883b0a91 JR |
435 | } |
436 | ||
437 | int nested_svm_vmrun(struct vcpu_svm *svm) | |
438 | { | |
439 | int ret; | |
440 | struct vmcb *nested_vmcb; | |
441 | struct vmcb *hsave = svm->nested.hsave; | |
442 | struct vmcb *vmcb = svm->vmcb; | |
443 | struct kvm_host_map map; | |
444 | u64 vmcb_gpa; | |
445 | ||
7c67f546 PB |
446 | if (is_smm(&svm->vcpu)) { |
447 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | |
448 | return 1; | |
449 | } | |
883b0a91 | 450 | |
7c67f546 | 451 | vmcb_gpa = svm->vmcb->save.rax; |
883b0a91 JR |
452 | ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); |
453 | if (ret == -EINVAL) { | |
454 | kvm_inject_gp(&svm->vcpu, 0); | |
455 | return 1; | |
456 | } else if (ret) { | |
457 | return kvm_skip_emulated_instruction(&svm->vcpu); | |
458 | } | |
459 | ||
460 | ret = kvm_skip_emulated_instruction(&svm->vcpu); | |
461 | ||
462 | nested_vmcb = map.hva; | |
463 | ||
761e4169 | 464 | if (!nested_vmcb_checks(svm, nested_vmcb)) { |
883b0a91 JR |
465 | nested_vmcb->control.exit_code = SVM_EXIT_ERR; |
466 | nested_vmcb->control.exit_code_hi = 0; | |
467 | nested_vmcb->control.exit_info_1 = 0; | |
468 | nested_vmcb->control.exit_info_2 = 0; | |
69c9dfa2 | 469 | goto out; |
883b0a91 JR |
470 | } |
471 | ||
472 | trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, | |
473 | nested_vmcb->save.rip, | |
474 | nested_vmcb->control.int_ctl, | |
475 | nested_vmcb->control.event_inj, | |
476 | nested_vmcb->control.nested_ctl); | |
477 | ||
478 | trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, | |
479 | nested_vmcb->control.intercept_cr >> 16, | |
480 | nested_vmcb->control.intercept_exceptions, | |
481 | nested_vmcb->control.intercept); | |
482 | ||
483 | /* Clear internal status */ | |
484 | kvm_clear_exception_queue(&svm->vcpu); | |
485 | kvm_clear_interrupt_queue(&svm->vcpu); | |
486 | ||
487 | /* | |
488 | * Save the old vmcb, so we don't need to pick what we save, but can | |
489 | * restore everything when a VMEXIT occurs | |
490 | */ | |
491 | hsave->save.es = vmcb->save.es; | |
492 | hsave->save.cs = vmcb->save.cs; | |
493 | hsave->save.ss = vmcb->save.ss; | |
494 | hsave->save.ds = vmcb->save.ds; | |
495 | hsave->save.gdtr = vmcb->save.gdtr; | |
496 | hsave->save.idtr = vmcb->save.idtr; | |
497 | hsave->save.efer = svm->vcpu.arch.efer; | |
498 | hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); | |
499 | hsave->save.cr4 = svm->vcpu.arch.cr4; | |
500 | hsave->save.rflags = kvm_get_rflags(&svm->vcpu); | |
501 | hsave->save.rip = kvm_rip_read(&svm->vcpu); | |
502 | hsave->save.rsp = vmcb->save.rsp; | |
503 | hsave->save.rax = vmcb->save.rax; | |
504 | if (npt_enabled) | |
505 | hsave->save.cr3 = vmcb->save.cr3; | |
506 | else | |
507 | hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); | |
508 | ||
2f675917 | 509 | copy_vmcb_control_area(&hsave->control, &vmcb->control); |
883b0a91 | 510 | |
f74f9414 | 511 | svm->nested.nested_run_pending = 1; |
883b0a91 | 512 | |
59cd9bc5 VK |
513 | if (enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb)) |
514 | goto out_exit_err; | |
ebdb3dba | 515 | |
59cd9bc5 VK |
516 | if (nested_svm_vmrun_msrpm(svm)) |
517 | goto out; | |
883b0a91 | 518 | |
59cd9bc5 VK |
519 | out_exit_err: |
520 | svm->nested.nested_run_pending = 0; | |
521 | ||
522 | svm->vmcb->control.exit_code = SVM_EXIT_ERR; | |
523 | svm->vmcb->control.exit_code_hi = 0; | |
524 | svm->vmcb->control.exit_info_1 = 0; | |
525 | svm->vmcb->control.exit_info_2 = 0; | |
526 | ||
527 | nested_svm_vmexit(svm); | |
883b0a91 | 528 | |
69c9dfa2 PB |
529 | out: |
530 | kvm_vcpu_unmap(&svm->vcpu, &map, true); | |
531 | ||
883b0a91 JR |
532 | return ret; |
533 | } | |
534 | ||
535 | void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) | |
536 | { | |
537 | to_vmcb->save.fs = from_vmcb->save.fs; | |
538 | to_vmcb->save.gs = from_vmcb->save.gs; | |
539 | to_vmcb->save.tr = from_vmcb->save.tr; | |
540 | to_vmcb->save.ldtr = from_vmcb->save.ldtr; | |
541 | to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; | |
542 | to_vmcb->save.star = from_vmcb->save.star; | |
543 | to_vmcb->save.lstar = from_vmcb->save.lstar; | |
544 | to_vmcb->save.cstar = from_vmcb->save.cstar; | |
545 | to_vmcb->save.sfmask = from_vmcb->save.sfmask; | |
546 | to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; | |
547 | to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; | |
548 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; | |
549 | } | |
550 | ||
551 | int nested_svm_vmexit(struct vcpu_svm *svm) | |
552 | { | |
553 | int rc; | |
554 | struct vmcb *nested_vmcb; | |
555 | struct vmcb *hsave = svm->nested.hsave; | |
556 | struct vmcb *vmcb = svm->vmcb; | |
557 | struct kvm_host_map map; | |
558 | ||
883b0a91 JR |
559 | rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); |
560 | if (rc) { | |
561 | if (rc == -EINVAL) | |
562 | kvm_inject_gp(&svm->vcpu, 0); | |
563 | return 1; | |
564 | } | |
565 | ||
566 | nested_vmcb = map.hva; | |
567 | ||
568 | /* Exit Guest-Mode */ | |
569 | leave_guest_mode(&svm->vcpu); | |
570 | svm->nested.vmcb = 0; | |
2d8a42be | 571 | WARN_ON_ONCE(svm->nested.nested_run_pending); |
883b0a91 | 572 | |
38c0b192 PB |
573 | /* in case we halted in L2 */ |
574 | svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; | |
575 | ||
883b0a91 | 576 | /* Give the current vmcb to the guest */ |
ffdf7f9e | 577 | svm_set_gif(svm, false); |
883b0a91 JR |
578 | |
579 | nested_vmcb->save.es = vmcb->save.es; | |
580 | nested_vmcb->save.cs = vmcb->save.cs; | |
581 | nested_vmcb->save.ss = vmcb->save.ss; | |
582 | nested_vmcb->save.ds = vmcb->save.ds; | |
583 | nested_vmcb->save.gdtr = vmcb->save.gdtr; | |
584 | nested_vmcb->save.idtr = vmcb->save.idtr; | |
585 | nested_vmcb->save.efer = svm->vcpu.arch.efer; | |
586 | nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); | |
587 | nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); | |
588 | nested_vmcb->save.cr2 = vmcb->save.cr2; | |
589 | nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; | |
590 | nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); | |
b6162e82 VK |
591 | nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu); |
592 | nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu); | |
593 | nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu); | |
883b0a91 | 594 | nested_vmcb->save.dr7 = vmcb->save.dr7; |
5679b803 | 595 | nested_vmcb->save.dr6 = svm->vcpu.arch.dr6; |
883b0a91 JR |
596 | nested_vmcb->save.cpl = vmcb->save.cpl; |
597 | ||
883b0a91 JR |
598 | nested_vmcb->control.int_state = vmcb->control.int_state; |
599 | nested_vmcb->control.exit_code = vmcb->control.exit_code; | |
600 | nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; | |
601 | nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; | |
602 | nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; | |
36e2e983 PB |
603 | |
604 | if (nested_vmcb->control.exit_code != SVM_EXIT_ERR) | |
605 | nested_vmcb_save_pending_event(svm, nested_vmcb); | |
883b0a91 JR |
606 | |
607 | if (svm->nrips_enabled) | |
608 | nested_vmcb->control.next_rip = vmcb->control.next_rip; | |
609 | ||
2d8a42be PB |
610 | nested_vmcb->control.int_ctl = svm->nested.ctl.int_ctl; |
611 | nested_vmcb->control.tlb_ctl = svm->nested.ctl.tlb_ctl; | |
612 | nested_vmcb->control.event_inj = svm->nested.ctl.event_inj; | |
613 | nested_vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; | |
883b0a91 JR |
614 | |
615 | nested_vmcb->control.pause_filter_count = | |
616 | svm->vmcb->control.pause_filter_count; | |
617 | nested_vmcb->control.pause_filter_thresh = | |
618 | svm->vmcb->control.pause_filter_thresh; | |
619 | ||
883b0a91 | 620 | /* Restore the original control entries */ |
2f675917 | 621 | copy_vmcb_control_area(&vmcb->control, &hsave->control); |
883b0a91 | 622 | |
18fc6c55 PB |
623 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = |
624 | svm->vcpu.arch.l1_tsc_offset; | |
625 | ||
e670bf68 | 626 | svm->nested.ctl.nested_cr3 = 0; |
883b0a91 JR |
627 | |
628 | /* Restore selected save entries */ | |
629 | svm->vmcb->save.es = hsave->save.es; | |
630 | svm->vmcb->save.cs = hsave->save.cs; | |
631 | svm->vmcb->save.ss = hsave->save.ss; | |
632 | svm->vmcb->save.ds = hsave->save.ds; | |
633 | svm->vmcb->save.gdtr = hsave->save.gdtr; | |
634 | svm->vmcb->save.idtr = hsave->save.idtr; | |
635 | kvm_set_rflags(&svm->vcpu, hsave->save.rflags); | |
636 | svm_set_efer(&svm->vcpu, hsave->save.efer); | |
637 | svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); | |
638 | svm_set_cr4(&svm->vcpu, hsave->save.cr4); | |
883b0a91 JR |
639 | kvm_rax_write(&svm->vcpu, hsave->save.rax); |
640 | kvm_rsp_write(&svm->vcpu, hsave->save.rsp); | |
641 | kvm_rip_write(&svm->vcpu, hsave->save.rip); | |
642 | svm->vmcb->save.dr7 = 0; | |
643 | svm->vmcb->save.cpl = 0; | |
644 | svm->vmcb->control.exit_int_info = 0; | |
645 | ||
06e7852c | 646 | vmcb_mark_all_dirty(svm->vmcb); |
883b0a91 | 647 | |
36e2e983 PB |
648 | trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code, |
649 | nested_vmcb->control.exit_info_1, | |
650 | nested_vmcb->control.exit_info_2, | |
651 | nested_vmcb->control.exit_int_info, | |
652 | nested_vmcb->control.exit_int_info_err, | |
653 | KVM_ISA_SVM); | |
654 | ||
883b0a91 JR |
655 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
656 | ||
657 | nested_svm_uninit_mmu_context(&svm->vcpu); | |
bf7dea42 VK |
658 | |
659 | if (npt_enabled) { | |
660 | svm->vmcb->save.cr3 = hsave->save.cr3; | |
661 | svm->vcpu.arch.cr3 = hsave->save.cr3; | |
662 | } else { | |
663 | (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); | |
664 | } | |
665 | ||
883b0a91 JR |
666 | kvm_mmu_reset_context(&svm->vcpu); |
667 | kvm_mmu_load(&svm->vcpu); | |
668 | ||
669 | /* | |
670 | * Drop what we picked up for L2 via svm_complete_interrupts() so it | |
671 | * doesn't end up in L1. | |
672 | */ | |
673 | svm->vcpu.arch.nmi_injected = false; | |
674 | kvm_clear_exception_queue(&svm->vcpu); | |
675 | kvm_clear_interrupt_queue(&svm->vcpu); | |
676 | ||
677 | return 0; | |
678 | } | |
679 | ||
c513f484 PB |
680 | /* |
681 | * Forcibly leave nested mode in order to be able to reset the VCPU later on. | |
682 | */ | |
683 | void svm_leave_nested(struct vcpu_svm *svm) | |
684 | { | |
685 | if (is_guest_mode(&svm->vcpu)) { | |
686 | struct vmcb *hsave = svm->nested.hsave; | |
687 | struct vmcb *vmcb = svm->vmcb; | |
688 | ||
689 | svm->nested.nested_run_pending = 0; | |
690 | leave_guest_mode(&svm->vcpu); | |
691 | copy_vmcb_control_area(&vmcb->control, &hsave->control); | |
692 | nested_svm_uninit_mmu_context(&svm->vcpu); | |
693 | } | |
694 | } | |
695 | ||
883b0a91 JR |
696 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
697 | { | |
698 | u32 offset, msr, value; | |
699 | int write, mask; | |
700 | ||
e670bf68 | 701 | if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
883b0a91 JR |
702 | return NESTED_EXIT_HOST; |
703 | ||
704 | msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | |
705 | offset = svm_msrpm_offset(msr); | |
706 | write = svm->vmcb->control.exit_info_1 & 1; | |
707 | mask = 1 << ((2 * (msr & 0xf)) + write); | |
708 | ||
709 | if (offset == MSR_INVALID) | |
710 | return NESTED_EXIT_DONE; | |
711 | ||
712 | /* Offset is in 32 bit units but need in 8 bit units */ | |
713 | offset *= 4; | |
714 | ||
e670bf68 | 715 | if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) |
883b0a91 JR |
716 | return NESTED_EXIT_DONE; |
717 | ||
718 | return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; | |
719 | } | |
720 | ||
883b0a91 JR |
721 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
722 | { | |
723 | unsigned port, size, iopm_len; | |
724 | u16 val, mask; | |
725 | u8 start_bit; | |
726 | u64 gpa; | |
727 | ||
e670bf68 | 728 | if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT))) |
883b0a91 JR |
729 | return NESTED_EXIT_HOST; |
730 | ||
731 | port = svm->vmcb->control.exit_info_1 >> 16; | |
732 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> | |
733 | SVM_IOIO_SIZE_SHIFT; | |
e670bf68 | 734 | gpa = svm->nested.ctl.iopm_base_pa + (port / 8); |
883b0a91 JR |
735 | start_bit = port % 8; |
736 | iopm_len = (start_bit + size > 8) ? 2 : 1; | |
737 | mask = (0xf >> (4 - size)) << start_bit; | |
738 | val = 0; | |
739 | ||
740 | if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) | |
741 | return NESTED_EXIT_DONE; | |
742 | ||
743 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; | |
744 | } | |
745 | ||
746 | static int nested_svm_intercept(struct vcpu_svm *svm) | |
747 | { | |
748 | u32 exit_code = svm->vmcb->control.exit_code; | |
749 | int vmexit = NESTED_EXIT_HOST; | |
750 | ||
751 | switch (exit_code) { | |
752 | case SVM_EXIT_MSR: | |
753 | vmexit = nested_svm_exit_handled_msr(svm); | |
754 | break; | |
755 | case SVM_EXIT_IOIO: | |
756 | vmexit = nested_svm_intercept_ioio(svm); | |
757 | break; | |
758 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { | |
759 | u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); | |
e670bf68 | 760 | if (svm->nested.ctl.intercept_cr & bit) |
883b0a91 JR |
761 | vmexit = NESTED_EXIT_DONE; |
762 | break; | |
763 | } | |
764 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { | |
765 | u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); | |
e670bf68 | 766 | if (svm->nested.ctl.intercept_dr & bit) |
883b0a91 JR |
767 | vmexit = NESTED_EXIT_DONE; |
768 | break; | |
769 | } | |
770 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { | |
7c86663b PB |
771 | /* |
772 | * Host-intercepted exceptions have been checked already in | |
773 | * nested_svm_exit_special. There is nothing to do here, | |
774 | * the vmexit is injected by svm_check_nested_events. | |
775 | */ | |
776 | vmexit = NESTED_EXIT_DONE; | |
883b0a91 JR |
777 | break; |
778 | } | |
779 | case SVM_EXIT_ERR: { | |
780 | vmexit = NESTED_EXIT_DONE; | |
781 | break; | |
782 | } | |
783 | default: { | |
784 | u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); | |
e670bf68 | 785 | if (svm->nested.ctl.intercept & exit_bits) |
883b0a91 JR |
786 | vmexit = NESTED_EXIT_DONE; |
787 | } | |
788 | } | |
789 | ||
790 | return vmexit; | |
791 | } | |
792 | ||
793 | int nested_svm_exit_handled(struct vcpu_svm *svm) | |
794 | { | |
795 | int vmexit; | |
796 | ||
797 | vmexit = nested_svm_intercept(svm); | |
798 | ||
799 | if (vmexit == NESTED_EXIT_DONE) | |
800 | nested_svm_vmexit(svm); | |
801 | ||
802 | return vmexit; | |
803 | } | |
804 | ||
805 | int nested_svm_check_permissions(struct vcpu_svm *svm) | |
806 | { | |
807 | if (!(svm->vcpu.arch.efer & EFER_SVME) || | |
808 | !is_paging(&svm->vcpu)) { | |
809 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | |
810 | return 1; | |
811 | } | |
812 | ||
813 | if (svm->vmcb->save.cpl) { | |
814 | kvm_inject_gp(&svm->vcpu, 0); | |
815 | return 1; | |
816 | } | |
817 | ||
818 | return 0; | |
819 | } | |
820 | ||
7c86663b | 821 | static bool nested_exit_on_exception(struct vcpu_svm *svm) |
883b0a91 | 822 | { |
7c86663b | 823 | unsigned int nr = svm->vcpu.arch.exception.nr; |
883b0a91 | 824 | |
e670bf68 | 825 | return (svm->nested.ctl.intercept_exceptions & (1 << nr)); |
7c86663b | 826 | } |
883b0a91 | 827 | |
7c86663b PB |
828 | static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) |
829 | { | |
830 | unsigned int nr = svm->vcpu.arch.exception.nr; | |
883b0a91 JR |
831 | |
832 | svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; | |
833 | svm->vmcb->control.exit_code_hi = 0; | |
7c86663b PB |
834 | |
835 | if (svm->vcpu.arch.exception.has_error_code) | |
836 | svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; | |
883b0a91 JR |
837 | |
838 | /* | |
839 | * EXITINFO2 is undefined for all exception intercepts other | |
840 | * than #PF. | |
841 | */ | |
7c86663b PB |
842 | if (nr == PF_VECTOR) { |
843 | if (svm->vcpu.arch.exception.nested_apf) | |
844 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; | |
845 | else if (svm->vcpu.arch.exception.has_payload) | |
846 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; | |
847 | else | |
848 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; | |
849 | } else if (nr == DB_VECTOR) { | |
850 | /* See inject_pending_event. */ | |
851 | kvm_deliver_exception_payload(&svm->vcpu); | |
852 | if (svm->vcpu.arch.dr7 & DR7_GD) { | |
853 | svm->vcpu.arch.dr7 &= ~DR7_GD; | |
854 | kvm_update_dr7(&svm->vcpu); | |
855 | } | |
856 | } else | |
857 | WARN_ON(svm->vcpu.arch.exception.has_payload); | |
883b0a91 | 858 | |
7c86663b | 859 | nested_svm_vmexit(svm); |
883b0a91 JR |
860 | } |
861 | ||
55714cdd PB |
862 | static void nested_svm_smi(struct vcpu_svm *svm) |
863 | { | |
864 | svm->vmcb->control.exit_code = SVM_EXIT_SMI; | |
865 | svm->vmcb->control.exit_info_1 = 0; | |
866 | svm->vmcb->control.exit_info_2 = 0; | |
867 | ||
868 | nested_svm_vmexit(svm); | |
869 | } | |
870 | ||
9c3d370a CA |
871 | static void nested_svm_nmi(struct vcpu_svm *svm) |
872 | { | |
873 | svm->vmcb->control.exit_code = SVM_EXIT_NMI; | |
874 | svm->vmcb->control.exit_info_1 = 0; | |
875 | svm->vmcb->control.exit_info_2 = 0; | |
876 | ||
877 | nested_svm_vmexit(svm); | |
878 | } | |
879 | ||
883b0a91 JR |
880 | static void nested_svm_intr(struct vcpu_svm *svm) |
881 | { | |
6e085cbf PB |
882 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); |
883 | ||
883b0a91 JR |
884 | svm->vmcb->control.exit_code = SVM_EXIT_INTR; |
885 | svm->vmcb->control.exit_info_1 = 0; | |
886 | svm->vmcb->control.exit_info_2 = 0; | |
887 | ||
6e085cbf | 888 | nested_svm_vmexit(svm); |
883b0a91 JR |
889 | } |
890 | ||
5b672408 PB |
891 | static inline bool nested_exit_on_init(struct vcpu_svm *svm) |
892 | { | |
e670bf68 | 893 | return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT)); |
5b672408 PB |
894 | } |
895 | ||
896 | static void nested_svm_init(struct vcpu_svm *svm) | |
897 | { | |
898 | svm->vmcb->control.exit_code = SVM_EXIT_INIT; | |
899 | svm->vmcb->control.exit_info_1 = 0; | |
900 | svm->vmcb->control.exit_info_2 = 0; | |
901 | ||
902 | nested_svm_vmexit(svm); | |
903 | } | |
904 | ||
905 | ||
33b22172 | 906 | static int svm_check_nested_events(struct kvm_vcpu *vcpu) |
883b0a91 JR |
907 | { |
908 | struct vcpu_svm *svm = to_svm(vcpu); | |
909 | bool block_nested_events = | |
bd279629 | 910 | kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; |
5b672408 PB |
911 | struct kvm_lapic *apic = vcpu->arch.apic; |
912 | ||
913 | if (lapic_in_kernel(vcpu) && | |
914 | test_bit(KVM_APIC_INIT, &apic->pending_events)) { | |
915 | if (block_nested_events) | |
916 | return -EBUSY; | |
917 | if (!nested_exit_on_init(svm)) | |
918 | return 0; | |
919 | nested_svm_init(svm); | |
920 | return 0; | |
921 | } | |
883b0a91 | 922 | |
7c86663b PB |
923 | if (vcpu->arch.exception.pending) { |
924 | if (block_nested_events) | |
925 | return -EBUSY; | |
926 | if (!nested_exit_on_exception(svm)) | |
927 | return 0; | |
928 | nested_svm_inject_exception_vmexit(svm); | |
929 | return 0; | |
930 | } | |
931 | ||
221e7610 | 932 | if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { |
55714cdd PB |
933 | if (block_nested_events) |
934 | return -EBUSY; | |
221e7610 PB |
935 | if (!nested_exit_on_smi(svm)) |
936 | return 0; | |
55714cdd PB |
937 | nested_svm_smi(svm); |
938 | return 0; | |
939 | } | |
940 | ||
221e7610 | 941 | if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { |
9c3d370a CA |
942 | if (block_nested_events) |
943 | return -EBUSY; | |
221e7610 PB |
944 | if (!nested_exit_on_nmi(svm)) |
945 | return 0; | |
9c3d370a CA |
946 | nested_svm_nmi(svm); |
947 | return 0; | |
948 | } | |
949 | ||
221e7610 | 950 | if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { |
883b0a91 JR |
951 | if (block_nested_events) |
952 | return -EBUSY; | |
221e7610 PB |
953 | if (!nested_exit_on_intr(svm)) |
954 | return 0; | |
883b0a91 JR |
955 | nested_svm_intr(svm); |
956 | return 0; | |
957 | } | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
962 | int nested_svm_exit_special(struct vcpu_svm *svm) | |
963 | { | |
964 | u32 exit_code = svm->vmcb->control.exit_code; | |
965 | ||
966 | switch (exit_code) { | |
967 | case SVM_EXIT_INTR: | |
968 | case SVM_EXIT_NMI: | |
883b0a91 | 969 | case SVM_EXIT_NPF: |
7c86663b PB |
970 | return NESTED_EXIT_HOST; |
971 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { | |
972 | u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); | |
973 | ||
974 | if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) | |
883b0a91 | 975 | return NESTED_EXIT_HOST; |
7c86663b | 976 | else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && |
68fd66f1 | 977 | svm->vcpu.arch.apf.host_apf_flags) |
7c86663b | 978 | /* Trap async PF even if not shadowing */ |
883b0a91 JR |
979 | return NESTED_EXIT_HOST; |
980 | break; | |
7c86663b | 981 | } |
883b0a91 JR |
982 | default: |
983 | break; | |
984 | } | |
985 | ||
986 | return NESTED_EXIT_CONTINUE; | |
987 | } | |
33b22172 | 988 | |
cc440cda PB |
989 | static int svm_get_nested_state(struct kvm_vcpu *vcpu, |
990 | struct kvm_nested_state __user *user_kvm_nested_state, | |
991 | u32 user_data_size) | |
992 | { | |
993 | struct vcpu_svm *svm; | |
994 | struct kvm_nested_state kvm_state = { | |
995 | .flags = 0, | |
996 | .format = KVM_STATE_NESTED_FORMAT_SVM, | |
997 | .size = sizeof(kvm_state), | |
998 | }; | |
999 | struct vmcb __user *user_vmcb = (struct vmcb __user *) | |
1000 | &user_kvm_nested_state->data.svm[0]; | |
1001 | ||
1002 | if (!vcpu) | |
1003 | return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; | |
1004 | ||
1005 | svm = to_svm(vcpu); | |
1006 | ||
1007 | if (user_data_size < kvm_state.size) | |
1008 | goto out; | |
1009 | ||
1010 | /* First fill in the header and copy it out. */ | |
1011 | if (is_guest_mode(vcpu)) { | |
1012 | kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb; | |
1013 | kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; | |
1014 | kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; | |
1015 | ||
1016 | if (svm->nested.nested_run_pending) | |
1017 | kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; | |
1018 | } | |
1019 | ||
1020 | if (gif_set(svm)) | |
1021 | kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; | |
1022 | ||
1023 | if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) | |
1024 | return -EFAULT; | |
1025 | ||
1026 | if (!is_guest_mode(vcpu)) | |
1027 | goto out; | |
1028 | ||
1029 | /* | |
1030 | * Copy over the full size of the VMCB rather than just the size | |
1031 | * of the structs. | |
1032 | */ | |
1033 | if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) | |
1034 | return -EFAULT; | |
1035 | if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, | |
1036 | sizeof(user_vmcb->control))) | |
1037 | return -EFAULT; | |
1038 | if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save, | |
1039 | sizeof(user_vmcb->save))) | |
1040 | return -EFAULT; | |
1041 | ||
1042 | out: | |
1043 | return kvm_state.size; | |
1044 | } | |
1045 | ||
1046 | static int svm_set_nested_state(struct kvm_vcpu *vcpu, | |
1047 | struct kvm_nested_state __user *user_kvm_nested_state, | |
1048 | struct kvm_nested_state *kvm_state) | |
1049 | { | |
1050 | struct vcpu_svm *svm = to_svm(vcpu); | |
1051 | struct vmcb *hsave = svm->nested.hsave; | |
1052 | struct vmcb __user *user_vmcb = (struct vmcb __user *) | |
1053 | &user_kvm_nested_state->data.svm[0]; | |
1054 | struct vmcb_control_area ctl; | |
1055 | struct vmcb_save_area save; | |
1056 | u32 cr0; | |
1057 | ||
1058 | if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) | |
1059 | return -EINVAL; | |
1060 | ||
1061 | if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | | |
1062 | KVM_STATE_NESTED_RUN_PENDING | | |
1063 | KVM_STATE_NESTED_GIF_SET)) | |
1064 | return -EINVAL; | |
1065 | ||
1066 | /* | |
1067 | * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's | |
1068 | * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. | |
1069 | */ | |
1070 | if (!(vcpu->arch.efer & EFER_SVME)) { | |
1071 | /* GIF=1 and no guest mode are required if SVME=0. */ | |
1072 | if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) | |
1073 | return -EINVAL; | |
1074 | } | |
1075 | ||
1076 | /* SMM temporarily disables SVM, so we cannot be in guest mode. */ | |
1077 | if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) | |
1078 | return -EINVAL; | |
1079 | ||
1080 | if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { | |
1081 | svm_leave_nested(svm); | |
1082 | goto out_set_gif; | |
1083 | } | |
1084 | ||
1085 | if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) | |
1086 | return -EINVAL; | |
1087 | if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) | |
1088 | return -EINVAL; | |
1089 | if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl))) | |
1090 | return -EFAULT; | |
1091 | if (copy_from_user(&save, &user_vmcb->save, sizeof(save))) | |
1092 | return -EFAULT; | |
1093 | ||
1094 | if (!nested_vmcb_check_controls(&ctl)) | |
1095 | return -EINVAL; | |
1096 | ||
1097 | /* | |
1098 | * Processor state contains L2 state. Check that it is | |
1099 | * valid for guest mode (see nested_vmcb_checks). | |
1100 | */ | |
1101 | cr0 = kvm_read_cr0(vcpu); | |
1102 | if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) | |
1103 | return -EINVAL; | |
1104 | ||
1105 | /* | |
1106 | * Validate host state saved from before VMRUN (see | |
1107 | * nested_svm_check_permissions). | |
1108 | * TODO: validate reserved bits for all saved state. | |
1109 | */ | |
1110 | if (!(save.cr0 & X86_CR0_PG)) | |
1111 | return -EINVAL; | |
1112 | ||
1113 | /* | |
1114 | * All checks done, we can enter guest mode. L1 control fields | |
1115 | * come from the nested save state. Guest state is already | |
1116 | * in the registers, the save area of the nested state instead | |
1117 | * contains saved L1 state. | |
1118 | */ | |
1119 | copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); | |
1120 | hsave->save = save; | |
1121 | ||
1122 | svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa; | |
1123 | load_nested_vmcb_control(svm, &ctl); | |
1124 | nested_prepare_vmcb_control(svm); | |
1125 | ||
1126 | out_set_gif: | |
1127 | svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); | |
1128 | return 0; | |
1129 | } | |
1130 | ||
33b22172 PB |
1131 | struct kvm_x86_nested_ops svm_nested_ops = { |
1132 | .check_events = svm_check_nested_events, | |
cc440cda PB |
1133 | .get_state = svm_get_nested_state, |
1134 | .set_state = svm_set_nested_state, | |
33b22172 | 1135 | }; |