1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
25 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
27 #define IOPM_SIZE PAGE_SIZE * 3
28 #define MSRPM_SIZE PAGE_SIZE * 2
30 #define MAX_DIRECT_ACCESS_MSRS 20
31 #define MSRPM_OFFSETS 16
32 extern u32 msrpm_offsets
[MSRPM_OFFSETS
] __read_mostly
;
33 extern bool npt_enabled
;
34 extern bool intercept_smi
;
38 * VMCB_ALL_CLEAN_MASK might also need to
39 * be updated if this enum is modified.
42 VMCB_INTERCEPTS
, /* Intercept vectors, TSC offset,
44 VMCB_PERM_MAP
, /* IOPM Base and MSRPM Base */
46 VMCB_INTR
, /* int_ctl, int_vector */
47 VMCB_NPT
, /* npt_en, nCR3, gPAT */
48 VMCB_CR
, /* CR0, CR3, CR4, EFER */
49 VMCB_DR
, /* DR6, DR7 */
50 VMCB_DT
, /* GDT, IDT */
51 VMCB_SEG
, /* CS, DS, SS, ES, CPL */
52 VMCB_CR2
, /* CR2 only */
53 VMCB_LBR
, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
54 VMCB_AVIC
, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
55 * AVIC PHYSICAL_TABLE pointer,
56 * AVIC LOGICAL_TABLE pointer
58 VMCB_SW
= 31, /* Reserved for hypervisor/software use */
61 #define VMCB_ALL_CLEAN_MASK ( \
62 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
63 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
64 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
65 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
66 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
69 /* TPR and CR2 are always written before VMRUN */
70 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
73 bool active
; /* SEV enabled guest */
74 bool es_active
; /* SEV-ES enabled guest */
75 unsigned int asid
; /* ASID used for this guest */
76 unsigned int handle
; /* SEV firmware handle */
77 int fd
; /* SEV device fd */
78 unsigned long pages_locked
; /* Number of pages locked */
79 struct list_head regions_list
; /* List of registered regions */
80 u64 ap_jump_table
; /* SEV-ES AP Jump Table address */
81 struct kvm
*enc_context_owner
; /* Owner of copied encryption context */
82 struct misc_cg
*misc_cg
; /* For misc cgroup accounting */
88 /* Struct members for AVIC */
90 struct page
*avic_logical_id_table_page
;
91 struct page
*avic_physical_id_table_page
;
92 struct hlist_node hnode
;
94 struct kvm_sev_info sev_info
;
99 struct kvm_vmcb_info
{
103 uint64_t asid_generation
;
106 struct svm_nested_state
{
107 struct kvm_vmcb_info vmcb02
;
113 /* These are the merged vectors */
116 /* A VMRUN has started but has not yet been performed, so
117 * we cannot inject a nested vmexit yet. */
118 bool nested_run_pending
;
120 /* cache for control fields of the guest */
121 struct vmcb_control_area ctl
;
127 struct kvm_vcpu vcpu
;
128 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
130 struct kvm_vmcb_info vmcb01
;
131 struct kvm_vmcb_info
*current_vmcb
;
132 struct svm_cpu_data
*svm_data
;
144 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
145 * translated into the appropriate L2_CFG bits on the host to
146 * perform speculative control.
154 struct svm_nested_state nested
;
157 u64 nmi_singlestep_guest_rflags
;
159 unsigned int3_injected
;
160 unsigned long int3_rip
;
162 /* cached guest cpuid flags for faster access */
163 bool nrips_enabled
: 1;
167 struct page
*avic_backing_page
;
168 u64
*avic_physical_id_cache
;
169 bool avic_is_running
;
172 * Per-vcpu list of struct amd_svm_iommu_ir:
173 * This is used mainly to store interrupt remapping information used
174 * when update the vcpu affinity. This avoids the need to scan for
175 * IRTE and try to match ga_tag in the IOMMU driver.
177 struct list_head ir_list
;
178 spinlock_t ir_list_lock
;
180 /* Save desired MSR intercept (read: pass-through) state */
182 DECLARE_BITMAP(read
, MAX_DIRECT_ACCESS_MSRS
);
183 DECLARE_BITMAP(write
, MAX_DIRECT_ACCESS_MSRS
);
184 } shadow_msr_intercept
;
187 struct vmcb_save_area
*vmsa
;
189 struct kvm_host_map ghcb_map
;
190 bool received_first_sipi
;
192 /* SEV-ES scratch area support */
198 bool guest_state_loaded
;
201 struct svm_cpu_data
{
208 struct kvm_ldttss_desc
*tss_desc
;
210 struct page
*save_area
;
211 struct vmcb
*current_vmcb
;
213 /* index = sev_asid, value = vmcb pointer */
214 struct vmcb
**sev_vmcbs
;
217 DECLARE_PER_CPU(struct svm_cpu_data
*, svm_data
);
219 void recalc_intercepts(struct vcpu_svm
*svm
);
221 static inline struct kvm_svm
*to_kvm_svm(struct kvm
*kvm
)
223 return container_of(kvm
, struct kvm_svm
, kvm
);
226 static inline bool sev_guest(struct kvm
*kvm
)
228 #ifdef CONFIG_KVM_AMD_SEV
229 struct kvm_sev_info
*sev
= &to_kvm_svm(kvm
)->sev_info
;
237 static inline bool sev_es_guest(struct kvm
*kvm
)
239 #ifdef CONFIG_KVM_AMD_SEV
240 struct kvm_sev_info
*sev
= &to_kvm_svm(kvm
)->sev_info
;
242 return sev_guest(kvm
) && sev
->es_active
;
248 static inline void vmcb_mark_all_dirty(struct vmcb
*vmcb
)
250 vmcb
->control
.clean
= 0;
253 static inline void vmcb_mark_all_clean(struct vmcb
*vmcb
)
255 vmcb
->control
.clean
= VMCB_ALL_CLEAN_MASK
256 & ~VMCB_ALWAYS_DIRTY_MASK
;
259 static inline bool vmcb_is_clean(struct vmcb
*vmcb
, int bit
)
261 return (vmcb
->control
.clean
& (1 << bit
));
264 static inline void vmcb_mark_dirty(struct vmcb
*vmcb
, int bit
)
266 vmcb
->control
.clean
&= ~(1 << bit
);
269 static inline bool vmcb_is_dirty(struct vmcb
*vmcb
, int bit
)
271 return !test_bit(bit
, (unsigned long *)&vmcb
->control
.clean
);
274 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
276 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
279 static inline void vmcb_set_intercept(struct vmcb_control_area
*control
, u32 bit
)
281 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
282 __set_bit(bit
, (unsigned long *)&control
->intercepts
);
285 static inline void vmcb_clr_intercept(struct vmcb_control_area
*control
, u32 bit
)
287 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
288 __clear_bit(bit
, (unsigned long *)&control
->intercepts
);
291 static inline bool vmcb_is_intercept(struct vmcb_control_area
*control
, u32 bit
)
293 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
294 return test_bit(bit
, (unsigned long *)&control
->intercepts
);
297 static inline void set_dr_intercepts(struct vcpu_svm
*svm
)
299 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
301 if (!sev_es_guest(svm
->vcpu
.kvm
)) {
302 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_READ
);
303 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_READ
);
304 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_READ
);
305 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_READ
);
306 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_READ
);
307 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_READ
);
308 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_READ
);
309 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_WRITE
);
310 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_WRITE
);
311 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_WRITE
);
312 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_WRITE
);
313 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_WRITE
);
314 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_WRITE
);
315 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_WRITE
);
318 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
319 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
321 recalc_intercepts(svm
);
324 static inline void clr_dr_intercepts(struct vcpu_svm
*svm
)
326 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
328 vmcb
->control
.intercepts
[INTERCEPT_DR
] = 0;
330 /* DR7 access must remain intercepted for an SEV-ES guest */
331 if (sev_es_guest(svm
->vcpu
.kvm
)) {
332 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
333 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
336 recalc_intercepts(svm
);
339 static inline void set_exception_intercept(struct vcpu_svm
*svm
, u32 bit
)
341 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
343 WARN_ON_ONCE(bit
>= 32);
344 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_EXCEPTION_OFFSET
+ bit
);
346 recalc_intercepts(svm
);
349 static inline void clr_exception_intercept(struct vcpu_svm
*svm
, u32 bit
)
351 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
353 WARN_ON_ONCE(bit
>= 32);
354 vmcb_clr_intercept(&vmcb
->control
, INTERCEPT_EXCEPTION_OFFSET
+ bit
);
356 recalc_intercepts(svm
);
359 static inline void svm_set_intercept(struct vcpu_svm
*svm
, int bit
)
361 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
363 vmcb_set_intercept(&vmcb
->control
, bit
);
365 recalc_intercepts(svm
);
368 static inline void svm_clr_intercept(struct vcpu_svm
*svm
, int bit
)
370 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
372 vmcb_clr_intercept(&vmcb
->control
, bit
);
374 recalc_intercepts(svm
);
377 static inline bool svm_is_intercept(struct vcpu_svm
*svm
, int bit
)
379 return vmcb_is_intercept(&svm
->vmcb
->control
, bit
);
382 static inline bool vgif_enabled(struct vcpu_svm
*svm
)
384 return !!(svm
->vmcb
->control
.int_ctl
& V_GIF_ENABLE_MASK
);
387 static inline void enable_gif(struct vcpu_svm
*svm
)
389 if (vgif_enabled(svm
))
390 svm
->vmcb
->control
.int_ctl
|= V_GIF_MASK
;
392 svm
->vcpu
.arch
.hflags
|= HF_GIF_MASK
;
395 static inline void disable_gif(struct vcpu_svm
*svm
)
397 if (vgif_enabled(svm
))
398 svm
->vmcb
->control
.int_ctl
&= ~V_GIF_MASK
;
400 svm
->vcpu
.arch
.hflags
&= ~HF_GIF_MASK
;
403 static inline bool gif_set(struct vcpu_svm
*svm
)
405 if (vgif_enabled(svm
))
406 return !!(svm
->vmcb
->control
.int_ctl
& V_GIF_MASK
);
408 return !!(svm
->vcpu
.arch
.hflags
& HF_GIF_MASK
);
412 #define MSR_INVALID 0xffffffffU
414 extern bool dump_invalid_vmcb
;
416 u32
svm_msrpm_offset(u32 msr
);
417 u32
*svm_vcpu_alloc_msrpm(void);
418 void svm_vcpu_init_msrpm(struct kvm_vcpu
*vcpu
, u32
*msrpm
);
419 void svm_vcpu_free_msrpm(u32
*msrpm
);
421 int svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
422 void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
423 void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
424 void svm_flush_tlb(struct kvm_vcpu
*vcpu
);
425 void disable_nmi_singlestep(struct vcpu_svm
*svm
);
426 bool svm_smi_blocked(struct kvm_vcpu
*vcpu
);
427 bool svm_nmi_blocked(struct kvm_vcpu
*vcpu
);
428 bool svm_interrupt_blocked(struct kvm_vcpu
*vcpu
);
429 void svm_set_gif(struct vcpu_svm
*svm
, bool value
);
430 int svm_invoke_exit_handler(struct kvm_vcpu
*vcpu
, u64 exit_code
);
431 void set_msr_interception(struct kvm_vcpu
*vcpu
, u32
*msrpm
, u32 msr
,
432 int read
, int write
);
436 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
437 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
438 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
440 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu
*vcpu
)
442 struct vcpu_svm
*svm
= to_svm(vcpu
);
444 return is_guest_mode(vcpu
) && (svm
->nested
.ctl
.int_ctl
& V_INTR_MASKING_MASK
);
447 static inline bool nested_exit_on_smi(struct vcpu_svm
*svm
)
449 return vmcb_is_intercept(&svm
->nested
.ctl
, INTERCEPT_SMI
);
452 static inline bool nested_exit_on_intr(struct vcpu_svm
*svm
)
454 return vmcb_is_intercept(&svm
->nested
.ctl
, INTERCEPT_INTR
);
457 static inline bool nested_exit_on_nmi(struct vcpu_svm
*svm
)
459 return vmcb_is_intercept(&svm
->nested
.ctl
, INTERCEPT_NMI
);
462 int enter_svm_guest_mode(struct kvm_vcpu
*vcpu
,
463 u64 vmcb_gpa
, struct vmcb
*vmcb12
, bool from_vmrun
);
464 void svm_leave_nested(struct vcpu_svm
*svm
);
465 void svm_free_nested(struct vcpu_svm
*svm
);
466 int svm_allocate_nested(struct vcpu_svm
*svm
);
467 int nested_svm_vmrun(struct kvm_vcpu
*vcpu
);
468 void svm_copy_vmrun_state(struct vmcb_save_area
*to_save
,
469 struct vmcb_save_area
*from_save
);
470 void svm_copy_vmloadsave_state(struct vmcb
*to_vmcb
, struct vmcb
*from_vmcb
);
471 int nested_svm_vmexit(struct vcpu_svm
*svm
);
473 static inline int nested_svm_simple_vmexit(struct vcpu_svm
*svm
, u32 exit_code
)
475 svm
->vmcb
->control
.exit_code
= exit_code
;
476 svm
->vmcb
->control
.exit_info_1
= 0;
477 svm
->vmcb
->control
.exit_info_2
= 0;
478 return nested_svm_vmexit(svm
);
481 int nested_svm_exit_handled(struct vcpu_svm
*svm
);
482 int nested_svm_check_permissions(struct kvm_vcpu
*vcpu
);
483 int nested_svm_check_exception(struct vcpu_svm
*svm
, unsigned nr
,
484 bool has_error_code
, u32 error_code
);
485 int nested_svm_exit_special(struct vcpu_svm
*svm
);
486 void nested_load_control_from_vmcb12(struct vcpu_svm
*svm
,
487 struct vmcb_control_area
*control
);
488 void nested_sync_control_from_vmcb02(struct vcpu_svm
*svm
);
489 void nested_vmcb02_compute_g_pat(struct vcpu_svm
*svm
);
490 void svm_switch_vmcb(struct vcpu_svm
*svm
, struct kvm_vmcb_info
*target_vmcb
);
492 extern struct kvm_x86_nested_ops svm_nested_ops
;
496 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
497 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
498 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
500 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
501 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
502 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
503 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
505 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
507 static inline bool avic_vcpu_is_running(struct kvm_vcpu
*vcpu
)
509 struct vcpu_svm
*svm
= to_svm(vcpu
);
510 u64
*entry
= svm
->avic_physical_id_cache
;
515 return (READ_ONCE(*entry
) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK
);
518 int avic_ga_log_notifier(u32 ga_tag
);
519 void avic_vm_destroy(struct kvm
*kvm
);
520 int avic_vm_init(struct kvm
*kvm
);
521 void avic_init_vmcb(struct vcpu_svm
*svm
);
522 int avic_incomplete_ipi_interception(struct kvm_vcpu
*vcpu
);
523 int avic_unaccelerated_access_interception(struct kvm_vcpu
*vcpu
);
524 int avic_init_vcpu(struct vcpu_svm
*svm
);
525 void avic_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
526 void avic_vcpu_put(struct kvm_vcpu
*vcpu
);
527 void avic_post_state_restore(struct kvm_vcpu
*vcpu
);
528 void svm_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
);
529 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu
*vcpu
);
530 bool svm_check_apicv_inhibit_reasons(ulong bit
);
531 void svm_load_eoi_exitmap(struct kvm_vcpu
*vcpu
, u64
*eoi_exit_bitmap
);
532 void svm_hwapic_irr_update(struct kvm_vcpu
*vcpu
, int max_irr
);
533 void svm_hwapic_isr_update(struct kvm_vcpu
*vcpu
, int max_isr
);
534 int svm_deliver_avic_intr(struct kvm_vcpu
*vcpu
, int vec
);
535 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu
*vcpu
);
536 int svm_update_pi_irte(struct kvm
*kvm
, unsigned int host_irq
,
537 uint32_t guest_irq
, bool set
);
538 void svm_vcpu_blocking(struct kvm_vcpu
*vcpu
);
539 void svm_vcpu_unblocking(struct kvm_vcpu
*vcpu
);
543 #define GHCB_VERSION_MAX 1ULL
544 #define GHCB_VERSION_MIN 1ULL
547 extern unsigned int max_sev_asid
;
549 void sev_vm_destroy(struct kvm
*kvm
);
550 int svm_mem_enc_op(struct kvm
*kvm
, void __user
*argp
);
551 int svm_register_enc_region(struct kvm
*kvm
,
552 struct kvm_enc_region
*range
);
553 int svm_unregister_enc_region(struct kvm
*kvm
,
554 struct kvm_enc_region
*range
);
555 int svm_vm_copy_asid_from(struct kvm
*kvm
, unsigned int source_fd
);
556 void pre_sev_run(struct vcpu_svm
*svm
, int cpu
);
557 void __init
sev_set_cpu_caps(void);
558 void __init
sev_hardware_setup(void);
559 void sev_hardware_teardown(void);
560 int sev_cpu_init(struct svm_cpu_data
*sd
);
561 void sev_free_vcpu(struct kvm_vcpu
*vcpu
);
562 int sev_handle_vmgexit(struct kvm_vcpu
*vcpu
);
563 int sev_es_string_io(struct vcpu_svm
*svm
, int size
, unsigned int port
, int in
);
564 void sev_es_init_vmcb(struct vcpu_svm
*svm
);
565 void sev_es_create_vcpu(struct vcpu_svm
*svm
);
566 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
);
567 void sev_es_prepare_guest_switch(struct vcpu_svm
*svm
, unsigned int cpu
);
568 void sev_es_unmap_ghcb(struct vcpu_svm
*svm
);
572 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa
);
573 void __svm_vcpu_run(unsigned long vmcb_pa
, unsigned long *regs
);