1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
25 #include "kvm_cache_regs.h"
27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29 #define IOPM_SIZE PAGE_SIZE * 3
30 #define MSRPM_SIZE PAGE_SIZE * 2
32 #define MAX_DIRECT_ACCESS_MSRS 46
33 #define MSRPM_OFFSETS 32
34 extern u32 msrpm_offsets
[MSRPM_OFFSETS
] __read_mostly
;
35 extern bool npt_enabled
;
37 extern bool intercept_smi
;
45 extern enum avic_modes avic_mode
;
49 * VMCB_ALL_CLEAN_MASK might also need to
50 * be updated if this enum is modified.
53 VMCB_INTERCEPTS
, /* Intercept vectors, TSC offset,
55 VMCB_PERM_MAP
, /* IOPM Base and MSRPM Base */
57 VMCB_INTR
, /* int_ctl, int_vector */
58 VMCB_NPT
, /* npt_en, nCR3, gPAT */
59 VMCB_CR
, /* CR0, CR3, CR4, EFER */
60 VMCB_DR
, /* DR6, DR7 */
61 VMCB_DT
, /* GDT, IDT */
62 VMCB_SEG
, /* CS, DS, SS, ES, CPL */
63 VMCB_CR2
, /* CR2 only */
64 VMCB_LBR
, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
65 VMCB_AVIC
, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
66 * AVIC PHYSICAL_TABLE pointer,
67 * AVIC LOGICAL_TABLE pointer
69 VMCB_SW
= 31, /* Reserved for hypervisor/software use */
72 #define VMCB_ALL_CLEAN_MASK ( \
73 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
74 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
75 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
76 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
77 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
80 /* TPR and CR2 are always written before VMRUN */
81 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
84 bool active
; /* SEV enabled guest */
85 bool es_active
; /* SEV-ES enabled guest */
86 unsigned int asid
; /* ASID used for this guest */
87 unsigned int handle
; /* SEV firmware handle */
88 int fd
; /* SEV device fd */
89 unsigned long pages_locked
; /* Number of pages locked */
90 struct list_head regions_list
; /* List of registered regions */
91 u64 ap_jump_table
; /* SEV-ES AP Jump Table address */
92 struct kvm
*enc_context_owner
; /* Owner of copied encryption context */
93 struct list_head mirror_vms
; /* List of VMs mirroring */
94 struct list_head mirror_entry
; /* Use as a list entry of mirrors */
95 struct misc_cg
*misc_cg
; /* For misc cgroup accounting */
96 atomic_t migration_in_progress
;
102 /* Struct members for AVIC */
104 struct page
*avic_logical_id_table_page
;
105 struct page
*avic_physical_id_table_page
;
106 struct hlist_node hnode
;
108 struct kvm_sev_info sev_info
;
113 struct kvm_vmcb_info
{
117 uint64_t asid_generation
;
120 struct vmcb_save_area_cached
{
129 struct vmcb_ctrl_area_cached
{
130 u32 intercepts
[MAX_INTERCEPT
];
131 u16 pause_filter_thresh
;
132 u16 pause_filter_count
;
146 u32 exit_int_info_err
;
155 struct hv_vmcb_enlightenments hv_enlightenments
;
160 struct svm_nested_state
{
161 struct kvm_vmcb_info vmcb02
;
167 /* These are the merged vectors */
170 /* A VMRUN has started but has not yet been performed, so
171 * we cannot inject a nested vmexit yet. */
172 bool nested_run_pending
;
174 /* cache for control fields of the guest */
175 struct vmcb_ctrl_area_cached ctl
;
178 * Note: this struct is not kept up-to-date while L2 runs; it is only
179 * valid within nested_svm_vmrun.
181 struct vmcb_save_area_cached save
;
186 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
187 * changes in MSR bitmap for L1 or switching to a different L2. Note,
188 * this flag can only be used reliably in conjunction with a paravirt L1
189 * which informs L0 whether any changes to MSR bitmap for L2 were done
192 bool force_msr_bitmap_recalc
;
195 struct vcpu_sev_es_state
{
197 struct sev_es_save_area
*vmsa
;
199 struct kvm_host_map ghcb_map
;
200 bool received_first_sipi
;
202 /* SEV-ES scratch area support */
210 struct kvm_vcpu vcpu
;
211 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
213 struct kvm_vmcb_info vmcb01
;
214 struct kvm_vmcb_info
*current_vmcb
;
228 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
229 * translated into the appropriate L2_CFG bits on the host to
230 * perform speculative control.
238 struct svm_nested_state nested
;
241 u64 nmi_singlestep_guest_rflags
;
244 unsigned long soft_int_csbase
;
245 unsigned long soft_int_old_rip
;
246 unsigned long soft_int_next_rip
;
247 bool soft_int_injected
;
249 /* optional nested SVM features that are enabled for this guest */
250 bool nrips_enabled
: 1;
251 bool tsc_scaling_enabled
: 1;
252 bool v_vmload_vmsave_enabled
: 1;
253 bool lbrv_enabled
: 1;
254 bool pause_filter_enabled
: 1;
255 bool pause_threshold_enabled
: 1;
256 bool vgif_enabled
: 1;
260 struct page
*avic_backing_page
;
261 u64
*avic_physical_id_cache
;
264 * Per-vcpu list of struct amd_svm_iommu_ir:
265 * This is used mainly to store interrupt remapping information used
266 * when update the vcpu affinity. This avoids the need to scan for
267 * IRTE and try to match ga_tag in the IOMMU driver.
269 struct list_head ir_list
;
270 spinlock_t ir_list_lock
;
272 /* Save desired MSR intercept (read: pass-through) state */
274 DECLARE_BITMAP(read
, MAX_DIRECT_ACCESS_MSRS
);
275 DECLARE_BITMAP(write
, MAX_DIRECT_ACCESS_MSRS
);
276 } shadow_msr_intercept
;
278 struct vcpu_sev_es_state sev_es
;
280 bool guest_state_loaded
;
282 bool x2avic_msrs_intercepted
;
285 struct svm_cpu_data
{
290 struct kvm_ldttss_desc
*tss_desc
;
292 struct page
*save_area
;
293 unsigned long save_area_pa
;
295 struct vmcb
*current_vmcb
;
297 /* index = sev_asid, value = vmcb pointer */
298 struct vmcb
**sev_vmcbs
;
301 DECLARE_PER_CPU(struct svm_cpu_data
, svm_data
);
303 void recalc_intercepts(struct vcpu_svm
*svm
);
305 static __always_inline
struct kvm_svm
*to_kvm_svm(struct kvm
*kvm
)
307 return container_of(kvm
, struct kvm_svm
, kvm
);
310 static __always_inline
bool sev_guest(struct kvm
*kvm
)
312 #ifdef CONFIG_KVM_AMD_SEV
313 struct kvm_sev_info
*sev
= &to_kvm_svm(kvm
)->sev_info
;
321 static __always_inline
bool sev_es_guest(struct kvm
*kvm
)
323 #ifdef CONFIG_KVM_AMD_SEV
324 struct kvm_sev_info
*sev
= &to_kvm_svm(kvm
)->sev_info
;
326 return sev
->es_active
&& !WARN_ON_ONCE(!sev
->active
);
332 static inline void vmcb_mark_all_dirty(struct vmcb
*vmcb
)
334 vmcb
->control
.clean
= 0;
337 static inline void vmcb_mark_all_clean(struct vmcb
*vmcb
)
339 vmcb
->control
.clean
= VMCB_ALL_CLEAN_MASK
340 & ~VMCB_ALWAYS_DIRTY_MASK
;
343 static inline void vmcb_mark_dirty(struct vmcb
*vmcb
, int bit
)
345 vmcb
->control
.clean
&= ~(1 << bit
);
348 static inline bool vmcb_is_dirty(struct vmcb
*vmcb
, int bit
)
350 return !test_bit(bit
, (unsigned long *)&vmcb
->control
.clean
);
353 static __always_inline
struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
355 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
359 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
360 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
362 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
363 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
364 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
366 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
368 static inline void vmcb_set_intercept(struct vmcb_control_area
*control
, u32 bit
)
370 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
371 __set_bit(bit
, (unsigned long *)&control
->intercepts
);
374 static inline void vmcb_clr_intercept(struct vmcb_control_area
*control
, u32 bit
)
376 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
377 __clear_bit(bit
, (unsigned long *)&control
->intercepts
);
380 static inline bool vmcb_is_intercept(struct vmcb_control_area
*control
, u32 bit
)
382 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
383 return test_bit(bit
, (unsigned long *)&control
->intercepts
);
386 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached
*control
, u32 bit
)
388 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
389 return test_bit(bit
, (unsigned long *)&control
->intercepts
);
392 static inline void set_dr_intercepts(struct vcpu_svm
*svm
)
394 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
396 if (!sev_es_guest(svm
->vcpu
.kvm
)) {
397 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_READ
);
398 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_READ
);
399 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_READ
);
400 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_READ
);
401 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_READ
);
402 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_READ
);
403 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_READ
);
404 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_WRITE
);
405 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_WRITE
);
406 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_WRITE
);
407 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_WRITE
);
408 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_WRITE
);
409 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_WRITE
);
410 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_WRITE
);
413 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
414 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
416 recalc_intercepts(svm
);
419 static inline void clr_dr_intercepts(struct vcpu_svm
*svm
)
421 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
423 vmcb
->control
.intercepts
[INTERCEPT_DR
] = 0;
425 /* DR7 access must remain intercepted for an SEV-ES guest */
426 if (sev_es_guest(svm
->vcpu
.kvm
)) {
427 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
428 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
431 recalc_intercepts(svm
);
434 static inline void set_exception_intercept(struct vcpu_svm
*svm
, u32 bit
)
436 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
438 WARN_ON_ONCE(bit
>= 32);
439 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_EXCEPTION_OFFSET
+ bit
);
441 recalc_intercepts(svm
);
444 static inline void clr_exception_intercept(struct vcpu_svm
*svm
, u32 bit
)
446 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
448 WARN_ON_ONCE(bit
>= 32);
449 vmcb_clr_intercept(&vmcb
->control
, INTERCEPT_EXCEPTION_OFFSET
+ bit
);
451 recalc_intercepts(svm
);
454 static inline void svm_set_intercept(struct vcpu_svm
*svm
, int bit
)
456 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
458 vmcb_set_intercept(&vmcb
->control
, bit
);
460 recalc_intercepts(svm
);
463 static inline void svm_clr_intercept(struct vcpu_svm
*svm
, int bit
)
465 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
467 vmcb_clr_intercept(&vmcb
->control
, bit
);
469 recalc_intercepts(svm
);
472 static inline bool svm_is_intercept(struct vcpu_svm
*svm
, int bit
)
474 return vmcb_is_intercept(&svm
->vmcb
->control
, bit
);
477 static inline bool nested_vgif_enabled(struct vcpu_svm
*svm
)
479 return svm
->vgif_enabled
&& (svm
->nested
.ctl
.int_ctl
& V_GIF_ENABLE_MASK
);
482 static inline struct vmcb
*get_vgif_vmcb(struct vcpu_svm
*svm
)
487 if (is_guest_mode(&svm
->vcpu
) && !nested_vgif_enabled(svm
))
488 return svm
->nested
.vmcb02
.ptr
;
490 return svm
->vmcb01
.ptr
;
493 static inline void enable_gif(struct vcpu_svm
*svm
)
495 struct vmcb
*vmcb
= get_vgif_vmcb(svm
);
498 vmcb
->control
.int_ctl
|= V_GIF_MASK
;
500 svm
->vcpu
.arch
.hflags
|= HF_GIF_MASK
;
503 static inline void disable_gif(struct vcpu_svm
*svm
)
505 struct vmcb
*vmcb
= get_vgif_vmcb(svm
);
508 vmcb
->control
.int_ctl
&= ~V_GIF_MASK
;
510 svm
->vcpu
.arch
.hflags
&= ~HF_GIF_MASK
;
513 static inline bool gif_set(struct vcpu_svm
*svm
)
515 struct vmcb
*vmcb
= get_vgif_vmcb(svm
);
518 return !!(vmcb
->control
.int_ctl
& V_GIF_MASK
);
520 return !!(svm
->vcpu
.arch
.hflags
& HF_GIF_MASK
);
523 static inline bool nested_npt_enabled(struct vcpu_svm
*svm
)
525 return svm
->nested
.ctl
.nested_ctl
& SVM_NESTED_CTL_NP_ENABLE
;
528 static inline bool is_x2apic_msrpm_offset(u32 offset
)
530 /* 4 msrs per u8, and 4 u8 in u32 */
531 u32 msr
= offset
* 16;
533 return (msr
>= APIC_BASE_MSR
) &&
534 (msr
< (APIC_BASE_MSR
+ 0x100));
538 #define MSR_INVALID 0xffffffffU
540 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
542 extern bool dump_invalid_vmcb
;
544 u32
svm_msrpm_offset(u32 msr
);
545 u32
*svm_vcpu_alloc_msrpm(void);
546 void svm_vcpu_init_msrpm(struct kvm_vcpu
*vcpu
, u32
*msrpm
);
547 void svm_vcpu_free_msrpm(u32
*msrpm
);
548 void svm_copy_lbrs(struct vmcb
*to_vmcb
, struct vmcb
*from_vmcb
);
549 void svm_update_lbrv(struct kvm_vcpu
*vcpu
);
551 int svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
552 void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
553 void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
554 void disable_nmi_singlestep(struct vcpu_svm
*svm
);
555 bool svm_smi_blocked(struct kvm_vcpu
*vcpu
);
556 bool svm_nmi_blocked(struct kvm_vcpu
*vcpu
);
557 bool svm_interrupt_blocked(struct kvm_vcpu
*vcpu
);
558 void svm_set_gif(struct vcpu_svm
*svm
, bool value
);
559 int svm_invoke_exit_handler(struct kvm_vcpu
*vcpu
, u64 exit_code
);
560 void set_msr_interception(struct kvm_vcpu
*vcpu
, u32
*msrpm
, u32 msr
,
561 int read
, int write
);
562 void svm_set_x2apic_msr_interception(struct vcpu_svm
*svm
, bool disable
);
563 void svm_complete_interrupt_delivery(struct kvm_vcpu
*vcpu
, int delivery_mode
,
564 int trig_mode
, int vec
);
568 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
569 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
570 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
572 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu
*vcpu
)
574 struct vcpu_svm
*svm
= to_svm(vcpu
);
576 return is_guest_mode(vcpu
) && (svm
->nested
.ctl
.int_ctl
& V_INTR_MASKING_MASK
);
579 static inline bool nested_exit_on_smi(struct vcpu_svm
*svm
)
581 return vmcb12_is_intercept(&svm
->nested
.ctl
, INTERCEPT_SMI
);
584 static inline bool nested_exit_on_intr(struct vcpu_svm
*svm
)
586 return vmcb12_is_intercept(&svm
->nested
.ctl
, INTERCEPT_INTR
);
589 static inline bool nested_exit_on_nmi(struct vcpu_svm
*svm
)
591 return vmcb12_is_intercept(&svm
->nested
.ctl
, INTERCEPT_NMI
);
594 int enter_svm_guest_mode(struct kvm_vcpu
*vcpu
,
595 u64 vmcb_gpa
, struct vmcb
*vmcb12
, bool from_vmrun
);
596 void svm_leave_nested(struct kvm_vcpu
*vcpu
);
597 void svm_free_nested(struct vcpu_svm
*svm
);
598 int svm_allocate_nested(struct vcpu_svm
*svm
);
599 int nested_svm_vmrun(struct kvm_vcpu
*vcpu
);
600 void svm_copy_vmrun_state(struct vmcb_save_area
*to_save
,
601 struct vmcb_save_area
*from_save
);
602 void svm_copy_vmloadsave_state(struct vmcb
*to_vmcb
, struct vmcb
*from_vmcb
);
603 int nested_svm_vmexit(struct vcpu_svm
*svm
);
605 static inline int nested_svm_simple_vmexit(struct vcpu_svm
*svm
, u32 exit_code
)
607 svm
->vmcb
->control
.exit_code
= exit_code
;
608 svm
->vmcb
->control
.exit_info_1
= 0;
609 svm
->vmcb
->control
.exit_info_2
= 0;
610 return nested_svm_vmexit(svm
);
613 int nested_svm_exit_handled(struct vcpu_svm
*svm
);
614 int nested_svm_check_permissions(struct kvm_vcpu
*vcpu
);
615 int nested_svm_check_exception(struct vcpu_svm
*svm
, unsigned nr
,
616 bool has_error_code
, u32 error_code
);
617 int nested_svm_exit_special(struct vcpu_svm
*svm
);
618 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu
*vcpu
);
619 void __svm_write_tsc_multiplier(u64 multiplier
);
620 void nested_copy_vmcb_control_to_cache(struct vcpu_svm
*svm
,
621 struct vmcb_control_area
*control
);
622 void nested_copy_vmcb_save_to_cache(struct vcpu_svm
*svm
,
623 struct vmcb_save_area
*save
);
624 void nested_sync_control_from_vmcb02(struct vcpu_svm
*svm
);
625 void nested_vmcb02_compute_g_pat(struct vcpu_svm
*svm
);
626 void svm_switch_vmcb(struct vcpu_svm
*svm
, struct kvm_vmcb_info
*target_vmcb
);
628 extern struct kvm_x86_nested_ops svm_nested_ops
;
632 bool avic_hardware_setup(struct kvm_x86_ops
*ops
);
633 int avic_ga_log_notifier(u32 ga_tag
);
634 void avic_vm_destroy(struct kvm
*kvm
);
635 int avic_vm_init(struct kvm
*kvm
);
636 void avic_init_vmcb(struct vcpu_svm
*svm
, struct vmcb
*vmcb
);
637 int avic_incomplete_ipi_interception(struct kvm_vcpu
*vcpu
);
638 int avic_unaccelerated_access_interception(struct kvm_vcpu
*vcpu
);
639 int avic_init_vcpu(struct vcpu_svm
*svm
);
640 void avic_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
641 void avic_vcpu_put(struct kvm_vcpu
*vcpu
);
642 void avic_apicv_post_state_restore(struct kvm_vcpu
*vcpu
);
643 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu
*vcpu
);
644 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason
);
645 int avic_pi_update_irte(struct kvm
*kvm
, unsigned int host_irq
,
646 uint32_t guest_irq
, bool set
);
647 void avic_vcpu_blocking(struct kvm_vcpu
*vcpu
);
648 void avic_vcpu_unblocking(struct kvm_vcpu
*vcpu
);
649 void avic_ring_doorbell(struct kvm_vcpu
*vcpu
);
650 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu
*vcpu
);
651 void avic_refresh_virtual_apic_mode(struct kvm_vcpu
*vcpu
);
656 #define GHCB_VERSION_MAX 1ULL
657 #define GHCB_VERSION_MIN 1ULL
660 extern unsigned int max_sev_asid
;
662 void sev_vm_destroy(struct kvm
*kvm
);
663 int sev_mem_enc_ioctl(struct kvm
*kvm
, void __user
*argp
);
664 int sev_mem_enc_register_region(struct kvm
*kvm
,
665 struct kvm_enc_region
*range
);
666 int sev_mem_enc_unregister_region(struct kvm
*kvm
,
667 struct kvm_enc_region
*range
);
668 int sev_vm_copy_enc_context_from(struct kvm
*kvm
, unsigned int source_fd
);
669 int sev_vm_move_enc_context_from(struct kvm
*kvm
, unsigned int source_fd
);
670 void sev_guest_memory_reclaimed(struct kvm
*kvm
);
672 void pre_sev_run(struct vcpu_svm
*svm
, int cpu
);
673 void __init
sev_set_cpu_caps(void);
674 void __init
sev_hardware_setup(void);
675 void sev_hardware_unsetup(void);
676 int sev_cpu_init(struct svm_cpu_data
*sd
);
677 void sev_init_vmcb(struct vcpu_svm
*svm
);
678 void sev_free_vcpu(struct kvm_vcpu
*vcpu
);
679 int sev_handle_vmgexit(struct kvm_vcpu
*vcpu
);
680 int sev_es_string_io(struct vcpu_svm
*svm
, int size
, unsigned int port
, int in
);
681 void sev_es_vcpu_reset(struct vcpu_svm
*svm
);
682 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
);
683 void sev_es_prepare_switch_to_guest(struct sev_es_save_area
*hostsa
);
684 void sev_es_unmap_ghcb(struct vcpu_svm
*svm
);
688 void __svm_sev_es_vcpu_run(struct vcpu_svm
*svm
, bool spec_ctrl_intercepted
);
689 void __svm_vcpu_run(struct vcpu_svm
*svm
, bool spec_ctrl_intercepted
);