]>
Commit | Line | Data |
---|---|---|
883b0a91 JR |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Kernel-based Virtual Machine driver for Linux | |
4 | * | |
5 | * AMD SVM support | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
9 | * | |
10 | * Authors: | |
11 | * Yaniv Kamay <yaniv@qumranet.com> | |
12 | * Avi Kivity <avi@qumranet.com> | |
13 | */ | |
14 | ||
15 | #ifndef __SVM_SVM_H | |
16 | #define __SVM_SVM_H | |
17 | ||
18 | #include <linux/kvm_types.h> | |
19 | #include <linux/kvm_host.h> | |
291bd20d | 20 | #include <linux/bits.h> |
883b0a91 JR |
21 | |
22 | #include <asm/svm.h> | |
b81fc74d | 23 | #include <asm/sev-common.h> |
883b0a91 | 24 | |
4a9e7b9e PG |
25 | #include "kvm_cache_regs.h" |
26 | ||
85ca8be9 TL |
27 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
28 | ||
47903dc1 KS |
29 | #define IOPM_SIZE PAGE_SIZE * 3 |
30 | #define MSRPM_SIZE PAGE_SIZE * 2 | |
31 | ||
5c127c85 SS |
32 | #define MAX_DIRECT_ACCESS_MSRS 46 |
33 | #define MSRPM_OFFSETS 32 | |
883b0a91 JR |
34 | extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
35 | extern bool npt_enabled; | |
ea91559b | 36 | extern int vgif; |
4b639a9f | 37 | extern bool intercept_smi; |
f628a34a | 38 | extern bool x2avic_enabled; |
4bdec12a | 39 | |
59d21d67 VP |
40 | /* |
41 | * Clean bits in VMCB. | |
42 | * VMCB_ALL_CLEAN_MASK might also need to | |
43 | * be updated if this enum is modified. | |
44 | */ | |
883b0a91 JR |
45 | enum { |
46 | VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, | |
47 | pause filter count */ | |
48 | VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ | |
49 | VMCB_ASID, /* ASID */ | |
50 | VMCB_INTR, /* int_ctl, int_vector */ | |
51 | VMCB_NPT, /* npt_en, nCR3, gPAT */ | |
52 | VMCB_CR, /* CR0, CR3, CR4, EFER */ | |
53 | VMCB_DR, /* DR6, DR7 */ | |
54 | VMCB_DT, /* GDT, IDT */ | |
55 | VMCB_SEG, /* CS, DS, SS, ES, CPL */ | |
56 | VMCB_CR2, /* CR2 only */ | |
57 | VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ | |
58 | VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, | |
59 | * AVIC PHYSICAL_TABLE pointer, | |
60 | * AVIC LOGICAL_TABLE pointer | |
61 | */ | |
59d21d67 | 62 | VMCB_SW = 31, /* Reserved for hypervisor/software use */ |
883b0a91 JR |
63 | }; |
64 | ||
59d21d67 VP |
65 | #define VMCB_ALL_CLEAN_MASK ( \ |
66 | (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ | |
67 | (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ | |
68 | (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ | |
69 | (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ | |
70 | (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ | |
71 | (1U << VMCB_SW)) | |
72 | ||
883b0a91 JR |
73 | /* TPR and CR2 are always written before VMRUN */ |
74 | #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) | |
75 | ||
76 | struct kvm_sev_info { | |
77 | bool active; /* SEV enabled guest */ | |
916391a2 | 78 | bool es_active; /* SEV-ES enabled guest */ |
883b0a91 JR |
79 | unsigned int asid; /* ASID used for this guest */ |
80 | unsigned int handle; /* SEV firmware handle */ | |
81 | int fd; /* SEV device fd */ | |
82 | unsigned long pages_locked; /* Number of pages locked */ | |
83 | struct list_head regions_list; /* List of registered regions */ | |
8640ca58 | 84 | u64 ap_jump_table; /* SEV-ES AP Jump Table address */ |
54526d1f | 85 | struct kvm *enc_context_owner; /* Owner of copied encryption context */ |
b2125513 PG |
86 | struct list_head mirror_vms; /* List of VMs mirroring */ |
87 | struct list_head mirror_entry; /* Use as a list entry of mirrors */ | |
7aef27f0 | 88 | struct misc_cg *misc_cg; /* For misc cgroup accounting */ |
b5663931 | 89 | atomic_t migration_in_progress; |
883b0a91 JR |
90 | }; |
91 | ||
92 | struct kvm_svm { | |
93 | struct kvm kvm; | |
94 | ||
95 | /* Struct members for AVIC */ | |
96 | u32 avic_vm_id; | |
97 | struct page *avic_logical_id_table_page; | |
98 | struct page *avic_physical_id_table_page; | |
99 | struct hlist_node hnode; | |
100 | ||
101 | struct kvm_sev_info sev_info; | |
102 | }; | |
103 | ||
104 | struct kvm_vcpu; | |
105 | ||
4995a368 CA |
106 | struct kvm_vmcb_info { |
107 | struct vmcb *ptr; | |
108 | unsigned long pa; | |
af18fa77 | 109 | int cpu; |
193015ad | 110 | uint64_t asid_generation; |
4995a368 CA |
111 | }; |
112 | ||
f2740a8d EGE |
113 | struct vmcb_save_area_cached { |
114 | u64 efer; | |
115 | u64 cr4; | |
116 | u64 cr3; | |
117 | u64 cr0; | |
118 | u64 dr7; | |
119 | u64 dr6; | |
120 | }; | |
121 | ||
8fc78909 EGE |
122 | struct vmcb_ctrl_area_cached { |
123 | u32 intercepts[MAX_INTERCEPT]; | |
124 | u16 pause_filter_thresh; | |
125 | u16 pause_filter_count; | |
126 | u64 iopm_base_pa; | |
127 | u64 msrpm_base_pa; | |
128 | u64 tsc_offset; | |
129 | u32 asid; | |
130 | u8 tlb_ctl; | |
131 | u32 int_ctl; | |
132 | u32 int_vector; | |
133 | u32 int_state; | |
134 | u32 exit_code; | |
135 | u32 exit_code_hi; | |
136 | u64 exit_info_1; | |
137 | u64 exit_info_2; | |
138 | u32 exit_int_info; | |
139 | u32 exit_int_info_err; | |
140 | u64 nested_ctl; | |
141 | u32 event_inj; | |
142 | u32 event_inj_err; | |
00f08d99 | 143 | u64 next_rip; |
8fc78909 EGE |
144 | u64 nested_cr3; |
145 | u64 virt_ext; | |
66c03a92 | 146 | u32 clean; |
68ae7c7b | 147 | union { |
26b516bb | 148 | struct hv_vmcb_enlightenments hv_enlightenments; |
68ae7c7b SC |
149 | u8 reserved_sw[32]; |
150 | }; | |
8fc78909 EGE |
151 | }; |
152 | ||
7693b3eb | 153 | struct svm_nested_state { |
4995a368 | 154 | struct kvm_vmcb_info vmcb02; |
883b0a91 JR |
155 | u64 hsave_msr; |
156 | u64 vm_cr_msr; | |
0dd16b5b | 157 | u64 vmcb12_gpa; |
8173396e | 158 | u64 last_vmcb12_gpa; |
883b0a91 JR |
159 | |
160 | /* These are the merged vectors */ | |
161 | u32 *msrpm; | |
162 | ||
f74f9414 PB |
163 | /* A VMRUN has started but has not yet been performed, so |
164 | * we cannot inject a nested vmexit yet. */ | |
165 | bool nested_run_pending; | |
166 | ||
e670bf68 | 167 | /* cache for control fields of the guest */ |
8fc78909 | 168 | struct vmcb_ctrl_area_cached ctl; |
2fcf4876 | 169 | |
f2740a8d EGE |
170 | /* |
171 | * Note: this struct is not kept up-to-date while L2 runs; it is only | |
172 | * valid within nested_svm_vmrun. | |
173 | */ | |
174 | struct vmcb_save_area_cached save; | |
175 | ||
2fcf4876 | 176 | bool initialized; |
73c25546 VK |
177 | |
178 | /* | |
179 | * Indicates whether MSR bitmap for L2 needs to be rebuilt due to | |
180 | * changes in MSR bitmap for L1 or switching to a different L2. Note, | |
181 | * this flag can only be used reliably in conjunction with a paravirt L1 | |
182 | * which informs L0 whether any changes to MSR bitmap for L2 were done | |
183 | * on its side. | |
184 | */ | |
185 | bool force_msr_bitmap_recalc; | |
883b0a91 JR |
186 | }; |
187 | ||
b67a4cc3 PG |
188 | struct vcpu_sev_es_state { |
189 | /* SEV-ES support */ | |
3dd2775b | 190 | struct sev_es_save_area *vmsa; |
b67a4cc3 PG |
191 | struct ghcb *ghcb; |
192 | struct kvm_host_map ghcb_map; | |
193 | bool received_first_sipi; | |
194 | ||
195 | /* SEV-ES scratch area support */ | |
196 | void *ghcb_sa; | |
1f058331 | 197 | u32 ghcb_sa_len; |
b67a4cc3 PG |
198 | bool ghcb_sa_sync; |
199 | bool ghcb_sa_free; | |
200 | }; | |
201 | ||
883b0a91 JR |
202 | struct vcpu_svm { |
203 | struct kvm_vcpu vcpu; | |
554cf314 | 204 | /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ |
883b0a91 | 205 | struct vmcb *vmcb; |
4995a368 CA |
206 | struct kvm_vmcb_info vmcb01; |
207 | struct kvm_vmcb_info *current_vmcb; | |
7e8e6eed | 208 | u32 asid; |
adc2a237 ML |
209 | u32 sysenter_esp_hi; |
210 | u32 sysenter_eip_hi; | |
883b0a91 JR |
211 | uint64_t tsc_aux; |
212 | ||
213 | u64 msr_decfg; | |
214 | ||
215 | u64 next_rip; | |
216 | ||
883b0a91 | 217 | u64 spec_ctrl; |
5228eb96 ML |
218 | |
219 | u64 tsc_ratio_msr; | |
883b0a91 JR |
220 | /* |
221 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be | |
222 | * translated into the appropriate L2_CFG bits on the host to | |
223 | * perform speculative control. | |
224 | */ | |
225 | u64 virt_spec_ctrl; | |
226 | ||
227 | u32 *msrpm; | |
228 | ||
229 | ulong nmi_iret_rip; | |
230 | ||
7693b3eb | 231 | struct svm_nested_state nested; |
883b0a91 JR |
232 | |
233 | bool nmi_singlestep; | |
234 | u64 nmi_singlestep_guest_rflags; | |
159fc6fa | 235 | bool nmi_l1_to_l2; |
883b0a91 | 236 | |
6ef88d6e SC |
237 | unsigned long soft_int_csbase; |
238 | unsigned long soft_int_old_rip; | |
239 | unsigned long soft_int_next_rip; | |
240 | bool soft_int_injected; | |
883b0a91 | 241 | |
b9f3973a | 242 | /* optional nested SVM features that are enabled for this guest */ |
5228eb96 ML |
243 | bool nrips_enabled : 1; |
244 | bool tsc_scaling_enabled : 1; | |
b9f3973a | 245 | bool v_vmload_vmsave_enabled : 1; |
d20c796c | 246 | bool lbrv_enabled : 1; |
74fd41ed ML |
247 | bool pause_filter_enabled : 1; |
248 | bool pause_threshold_enabled : 1; | |
0b349662 | 249 | bool vgif_enabled : 1; |
883b0a91 JR |
250 | |
251 | u32 ldr_reg; | |
252 | u32 dfr_reg; | |
253 | struct page *avic_backing_page; | |
254 | u64 *avic_physical_id_cache; | |
883b0a91 JR |
255 | |
256 | /* | |
257 | * Per-vcpu list of struct amd_svm_iommu_ir: | |
258 | * This is used mainly to store interrupt remapping information used | |
259 | * when update the vcpu affinity. This avoids the need to scan for | |
260 | * IRTE and try to match ga_tag in the IOMMU driver. | |
261 | */ | |
262 | struct list_head ir_list; | |
263 | spinlock_t ir_list_lock; | |
fd6fa73d AG |
264 | |
265 | /* Save desired MSR intercept (read: pass-through) state */ | |
266 | struct { | |
267 | DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); | |
268 | DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); | |
269 | } shadow_msr_intercept; | |
add5e2f0 | 270 | |
b67a4cc3 | 271 | struct vcpu_sev_es_state sev_es; |
a7fc06dd MR |
272 | |
273 | bool guest_state_loaded; | |
091abbf5 ML |
274 | |
275 | bool x2avic_msrs_intercepted; | |
883b0a91 JR |
276 | }; |
277 | ||
eaf78265 | 278 | struct svm_cpu_data { |
eaf78265 JR |
279 | u64 asid_generation; |
280 | u32 max_asid; | |
281 | u32 next_asid; | |
282 | u32 min_asid; | |
283 | struct kvm_ldttss_desc *tss_desc; | |
284 | ||
285 | struct page *save_area; | |
e287bd00 PB |
286 | unsigned long save_area_pa; |
287 | ||
eaf78265 JR |
288 | struct vmcb *current_vmcb; |
289 | ||
290 | /* index = sev_asid, value = vmcb pointer */ | |
291 | struct vmcb **sev_vmcbs; | |
292 | }; | |
293 | ||
73412dfe | 294 | DECLARE_PER_CPU(struct svm_cpu_data, svm_data); |
eaf78265 | 295 | |
883b0a91 JR |
296 | void recalc_intercepts(struct vcpu_svm *svm); |
297 | ||
2b2f72d4 | 298 | static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) |
ef0f6496 JR |
299 | { |
300 | return container_of(kvm, struct kvm_svm, kvm); | |
301 | } | |
302 | ||
2b2f72d4 | 303 | static __always_inline bool sev_guest(struct kvm *kvm) |
916391a2 TL |
304 | { |
305 | #ifdef CONFIG_KVM_AMD_SEV | |
306 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
307 | ||
308 | return sev->active; | |
309 | #else | |
310 | return false; | |
311 | #endif | |
312 | } | |
313 | ||
2b2f72d4 | 314 | static __always_inline bool sev_es_guest(struct kvm *kvm) |
916391a2 TL |
315 | { |
316 | #ifdef CONFIG_KVM_AMD_SEV | |
317 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
318 | ||
1bd00a42 | 319 | return sev->es_active && !WARN_ON_ONCE(!sev->active); |
916391a2 TL |
320 | #else |
321 | return false; | |
322 | #endif | |
323 | } | |
324 | ||
06e7852c | 325 | static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) |
883b0a91 JR |
326 | { |
327 | vmcb->control.clean = 0; | |
328 | } | |
329 | ||
06e7852c | 330 | static inline void vmcb_mark_all_clean(struct vmcb *vmcb) |
883b0a91 | 331 | { |
59d21d67 | 332 | vmcb->control.clean = VMCB_ALL_CLEAN_MASK |
883b0a91 JR |
333 | & ~VMCB_ALWAYS_DIRTY_MASK; |
334 | } | |
335 | ||
06e7852c | 336 | static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) |
883b0a91 JR |
337 | { |
338 | vmcb->control.clean &= ~(1 << bit); | |
339 | } | |
340 | ||
8173396e CA |
341 | static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) |
342 | { | |
343 | return !test_bit(bit, (unsigned long *)&vmcb->control.clean); | |
344 | } | |
345 | ||
aee045ed | 346 | static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
883b0a91 JR |
347 | { |
348 | return container_of(vcpu, struct vcpu_svm, vcpu); | |
349 | } | |
350 | ||
41e68b69 PB |
351 | /* |
352 | * Only the PDPTRs are loaded on demand into the shadow MMU. All other | |
23e5092b | 353 | * fields are synchronized on VM-Exit, because accessing the VMCB is cheap. |
41e68b69 PB |
354 | * |
355 | * CR3 might be out of date in the VMCB but it is not marked dirty; instead, | |
356 | * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 | |
357 | * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. | |
358 | */ | |
359 | #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) | |
360 | ||
c45ad722 BM |
361 | static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) |
362 | { | |
363 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
364 | __set_bit(bit, (unsigned long *)&control->intercepts); | |
365 | } | |
366 | ||
367 | static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) | |
368 | { | |
369 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
370 | __clear_bit(bit, (unsigned long *)&control->intercepts); | |
371 | } | |
372 | ||
373 | static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) | |
374 | { | |
375 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
376 | return test_bit(bit, (unsigned long *)&control->intercepts); | |
377 | } | |
378 | ||
8fc78909 EGE |
379 | static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) |
380 | { | |
381 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
382 | return test_bit(bit, (unsigned long *)&control->intercepts); | |
383 | } | |
384 | ||
883b0a91 JR |
385 | static inline void set_dr_intercepts(struct vcpu_svm *svm) |
386 | { | |
4995a368 | 387 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 388 | |
8d4846b9 TL |
389 | if (!sev_es_guest(svm->vcpu.kvm)) { |
390 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); | |
391 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); | |
392 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); | |
393 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); | |
394 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); | |
395 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); | |
396 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); | |
397 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); | |
398 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); | |
399 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); | |
400 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); | |
401 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); | |
402 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); | |
403 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); | |
404 | } | |
405 | ||
30abaa88 | 406 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); |
30abaa88 | 407 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); |
883b0a91 JR |
408 | |
409 | recalc_intercepts(svm); | |
410 | } | |
411 | ||
412 | static inline void clr_dr_intercepts(struct vcpu_svm *svm) | |
413 | { | |
4995a368 | 414 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 415 | |
30abaa88 | 416 | vmcb->control.intercepts[INTERCEPT_DR] = 0; |
883b0a91 | 417 | |
8d4846b9 TL |
418 | /* DR7 access must remain intercepted for an SEV-ES guest */ |
419 | if (sev_es_guest(svm->vcpu.kvm)) { | |
420 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); | |
421 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); | |
422 | } | |
423 | ||
883b0a91 JR |
424 | recalc_intercepts(svm); |
425 | } | |
426 | ||
9780d51d | 427 | static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 428 | { |
4995a368 | 429 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 430 | |
9780d51d BM |
431 | WARN_ON_ONCE(bit >= 32); |
432 | vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
433 | |
434 | recalc_intercepts(svm); | |
435 | } | |
436 | ||
9780d51d | 437 | static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 438 | { |
4995a368 | 439 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 440 | |
9780d51d BM |
441 | WARN_ON_ONCE(bit >= 32); |
442 | vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
443 | |
444 | recalc_intercepts(svm); | |
445 | } | |
446 | ||
a284ba56 | 447 | static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 448 | { |
4995a368 | 449 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 450 | |
c62e2e94 | 451 | vmcb_set_intercept(&vmcb->control, bit); |
883b0a91 JR |
452 | |
453 | recalc_intercepts(svm); | |
454 | } | |
455 | ||
a284ba56 | 456 | static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 457 | { |
4995a368 | 458 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 459 | |
c62e2e94 | 460 | vmcb_clr_intercept(&vmcb->control, bit); |
883b0a91 JR |
461 | |
462 | recalc_intercepts(svm); | |
463 | } | |
464 | ||
a284ba56 | 465 | static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 466 | { |
c62e2e94 | 467 | return vmcb_is_intercept(&svm->vmcb->control, bit); |
883b0a91 JR |
468 | } |
469 | ||
0b349662 ML |
470 | static inline bool nested_vgif_enabled(struct vcpu_svm *svm) |
471 | { | |
472 | return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); | |
473 | } | |
474 | ||
475 | static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) | |
476 | { | |
477 | if (!vgif) | |
478 | return NULL; | |
479 | ||
480 | if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm)) | |
481 | return svm->nested.vmcb02.ptr; | |
482 | else | |
483 | return svm->vmcb01.ptr; | |
484 | } | |
485 | ||
883b0a91 JR |
486 | static inline void enable_gif(struct vcpu_svm *svm) |
487 | { | |
0b349662 ML |
488 | struct vmcb *vmcb = get_vgif_vmcb(svm); |
489 | ||
490 | if (vmcb) | |
491 | vmcb->control.int_ctl |= V_GIF_MASK; | |
883b0a91 JR |
492 | else |
493 | svm->vcpu.arch.hflags |= HF_GIF_MASK; | |
494 | } | |
495 | ||
496 | static inline void disable_gif(struct vcpu_svm *svm) | |
497 | { | |
0b349662 ML |
498 | struct vmcb *vmcb = get_vgif_vmcb(svm); |
499 | ||
500 | if (vmcb) | |
501 | vmcb->control.int_ctl &= ~V_GIF_MASK; | |
883b0a91 JR |
502 | else |
503 | svm->vcpu.arch.hflags &= ~HF_GIF_MASK; | |
504 | } | |
505 | ||
506 | static inline bool gif_set(struct vcpu_svm *svm) | |
507 | { | |
0b349662 ML |
508 | struct vmcb *vmcb = get_vgif_vmcb(svm); |
509 | ||
510 | if (vmcb) | |
511 | return !!(vmcb->control.int_ctl & V_GIF_MASK); | |
883b0a91 JR |
512 | else |
513 | return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); | |
514 | } | |
515 | ||
b9f3973a ML |
516 | static inline bool nested_npt_enabled(struct vcpu_svm *svm) |
517 | { | |
518 | return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; | |
519 | } | |
520 | ||
7a8f7c1f ML |
521 | static inline bool is_x2apic_msrpm_offset(u32 offset) |
522 | { | |
523 | /* 4 msrs per u8, and 4 u8 in u32 */ | |
524 | u32 msr = offset * 16; | |
525 | ||
526 | return (msr >= APIC_BASE_MSR) && | |
527 | (msr < (APIC_BASE_MSR + 0x100)); | |
528 | } | |
529 | ||
883b0a91 | 530 | /* svm.c */ |
761e4169 | 531 | #define MSR_INVALID 0xffffffffU |
883b0a91 | 532 | |
d20c796c ML |
533 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
534 | ||
291bd20d | 535 | extern bool dump_invalid_vmcb; |
916391a2 | 536 | |
883b0a91 | 537 | u32 svm_msrpm_offset(u32 msr); |
2fcf4876 ML |
538 | u32 *svm_vcpu_alloc_msrpm(void); |
539 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); | |
540 | void svm_vcpu_free_msrpm(u32 *msrpm); | |
1d5a1b58 ML |
541 | void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); |
542 | void svm_update_lbrv(struct kvm_vcpu *vcpu); | |
2fcf4876 | 543 | |
72f211ec | 544 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
883b0a91 | 545 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
c2fe3cd4 | 546 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
883b0a91 | 547 | void disable_nmi_singlestep(struct vcpu_svm *svm); |
cae96af1 PB |
548 | bool svm_smi_blocked(struct kvm_vcpu *vcpu); |
549 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu); | |
550 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); | |
ffdf7f9e | 551 | void svm_set_gif(struct vcpu_svm *svm, bool value); |
63129754 | 552 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); |
376c6d28 TL |
553 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
554 | int read, int write); | |
4d1d7942 | 555 | void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable); |
66fa226c ML |
556 | void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, |
557 | int trig_mode, int vec); | |
883b0a91 JR |
558 | |
559 | /* nested.c */ | |
560 | ||
561 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ | |
562 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ | |
563 | #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ | |
564 | ||
01c3b2b5 | 565 | static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) |
883b0a91 | 566 | { |
e9fd761a PB |
567 | struct vcpu_svm *svm = to_svm(vcpu); |
568 | ||
569 | return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); | |
883b0a91 JR |
570 | } |
571 | ||
55714cdd PB |
572 | static inline bool nested_exit_on_smi(struct vcpu_svm *svm) |
573 | { | |
8fc78909 | 574 | return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); |
55714cdd PB |
575 | } |
576 | ||
fc6f7c03 PB |
577 | static inline bool nested_exit_on_intr(struct vcpu_svm *svm) |
578 | { | |
8fc78909 | 579 | return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); |
fc6f7c03 PB |
580 | } |
581 | ||
bbdad0b5 PB |
582 | static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) |
583 | { | |
8fc78909 | 584 | return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); |
bbdad0b5 PB |
585 | } |
586 | ||
e85d3e7b ML |
587 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, |
588 | u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); | |
f7e57078 | 589 | void svm_leave_nested(struct kvm_vcpu *vcpu); |
2fcf4876 ML |
590 | void svm_free_nested(struct vcpu_svm *svm); |
591 | int svm_allocate_nested(struct vcpu_svm *svm); | |
63129754 | 592 | int nested_svm_vmrun(struct kvm_vcpu *vcpu); |
2bb16bea VK |
593 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
594 | struct vmcb_save_area *from_save); | |
595 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); | |
883b0a91 | 596 | int nested_svm_vmexit(struct vcpu_svm *svm); |
3a87c7e0 SC |
597 | |
598 | static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) | |
599 | { | |
600 | svm->vmcb->control.exit_code = exit_code; | |
601 | svm->vmcb->control.exit_info_1 = 0; | |
602 | svm->vmcb->control.exit_info_2 = 0; | |
603 | return nested_svm_vmexit(svm); | |
604 | } | |
605 | ||
883b0a91 | 606 | int nested_svm_exit_handled(struct vcpu_svm *svm); |
63129754 | 607 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu); |
883b0a91 JR |
608 | int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
609 | bool has_error_code, u32 error_code); | |
883b0a91 | 610 | int nested_svm_exit_special(struct vcpu_svm *svm); |
5228eb96 | 611 | void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); |
11d39e8c | 612 | void __svm_write_tsc_multiplier(u64 multiplier); |
7907160d EGE |
613 | void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, |
614 | struct vmcb_control_area *control); | |
f2740a8d EGE |
615 | void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, |
616 | struct vmcb_save_area *save); | |
9e8f0fbf | 617 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); |
4995a368 CA |
618 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); |
619 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); | |
883b0a91 | 620 | |
33b22172 PB |
621 | extern struct kvm_x86_nested_ops svm_nested_ops; |
622 | ||
ef0f6496 JR |
623 | /* avic.c */ |
624 | ||
4bdec12a | 625 | bool avic_hardware_setup(struct kvm_x86_ops *ops); |
ef0f6496 JR |
626 | int avic_ga_log_notifier(u32 ga_tag); |
627 | void avic_vm_destroy(struct kvm *kvm); | |
628 | int avic_vm_init(struct kvm *kvm); | |
1ee73a33 | 629 | void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); |
63129754 PB |
630 | int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); |
631 | int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); | |
ef0f6496 | 632 | int avic_init_vcpu(struct vcpu_svm *svm); |
ba8ec273 ML |
633 | void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
634 | void avic_vcpu_put(struct kvm_vcpu *vcpu); | |
db6e7adf | 635 | void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); |
db6e7adf | 636 | void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); |
7491b7b2 | 637 | bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason); |
db6e7adf SC |
638 | int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, |
639 | uint32_t guest_irq, bool set); | |
a3c19d5b SC |
640 | void avic_vcpu_blocking(struct kvm_vcpu *vcpu); |
641 | void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
66fa226c | 642 | void avic_ring_doorbell(struct kvm_vcpu *vcpu); |
f44509f8 | 643 | unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu); |
e0bead97 | 644 | void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); |
05c4fe8c | 645 | |
ef0f6496 | 646 | |
eaf78265 JR |
647 | /* sev.c */ |
648 | ||
b81fc74d BS |
649 | #define GHCB_VERSION_MAX 1ULL |
650 | #define GHCB_VERSION_MIN 1ULL | |
651 | ||
e1d71116 | 652 | |
eaf78265 JR |
653 | extern unsigned int max_sev_asid; |
654 | ||
eaf78265 | 655 | void sev_vm_destroy(struct kvm *kvm); |
559c7c75 SC |
656 | int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); |
657 | int sev_mem_enc_register_region(struct kvm *kvm, | |
658 | struct kvm_enc_region *range); | |
659 | int sev_mem_enc_unregister_region(struct kvm *kvm, | |
660 | struct kvm_enc_region *range); | |
661 | int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); | |
662 | int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); | |
683412cc MZ |
663 | void sev_guest_memory_reclaimed(struct kvm *kvm); |
664 | ||
eaf78265 | 665 | void pre_sev_run(struct vcpu_svm *svm, int cpu); |
d9db0fd6 | 666 | void __init sev_set_cpu_caps(void); |
916391a2 | 667 | void __init sev_hardware_setup(void); |
23e5092b | 668 | void sev_hardware_unsetup(void); |
b95c221c | 669 | int sev_cpu_init(struct svm_cpu_data *sd); |
6defa24d | 670 | void sev_init_vmcb(struct vcpu_svm *svm); |
add5e2f0 | 671 | void sev_free_vcpu(struct kvm_vcpu *vcpu); |
63129754 | 672 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
7ed9abfe | 673 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
9ebe530b | 674 | void sev_es_vcpu_reset(struct vcpu_svm *svm); |
647daca2 | 675 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
3dd2775b | 676 | void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); |
ce7ea0cf | 677 | void sev_es_unmap_ghcb(struct vcpu_svm *svm); |
eaf78265 | 678 | |
16809ecd TL |
679 | /* vmenter.S */ |
680 | ||
9f2febf3 PB |
681 | void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); |
682 | void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); | |
16809ecd | 683 | |
883b0a91 | 684 | #endif |