1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
25 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
27 #define IOPM_SIZE PAGE_SIZE * 3
28 #define MSRPM_SIZE PAGE_SIZE * 2
30 #define MAX_DIRECT_ACCESS_MSRS 20
31 #define MSRPM_OFFSETS 16
32 extern u32 msrpm_offsets
[MSRPM_OFFSETS
] __read_mostly
;
33 extern bool npt_enabled
;
36 VMCB_INTERCEPTS
, /* Intercept vectors, TSC offset,
38 VMCB_PERM_MAP
, /* IOPM Base and MSRPM Base */
40 VMCB_INTR
, /* int_ctl, int_vector */
41 VMCB_NPT
, /* npt_en, nCR3, gPAT */
42 VMCB_CR
, /* CR0, CR3, CR4, EFER */
43 VMCB_DR
, /* DR6, DR7 */
44 VMCB_DT
, /* GDT, IDT */
45 VMCB_SEG
, /* CS, DS, SS, ES, CPL */
46 VMCB_CR2
, /* CR2 only */
47 VMCB_LBR
, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
48 VMCB_AVIC
, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
49 * AVIC PHYSICAL_TABLE pointer,
50 * AVIC LOGICAL_TABLE pointer
55 /* TPR and CR2 are always written before VMRUN */
56 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
59 bool active
; /* SEV enabled guest */
60 bool es_active
; /* SEV-ES enabled guest */
61 unsigned int asid
; /* ASID used for this guest */
62 unsigned int handle
; /* SEV firmware handle */
63 int fd
; /* SEV device fd */
64 unsigned long pages_locked
; /* Number of pages locked */
65 struct list_head regions_list
; /* List of registered regions */
66 u64 ap_jump_table
; /* SEV-ES AP Jump Table address */
67 struct kvm
*enc_context_owner
; /* Owner of copied encryption context */
68 struct misc_cg
*misc_cg
; /* For misc cgroup accounting */
74 /* Struct members for AVIC */
76 struct page
*avic_logical_id_table_page
;
77 struct page
*avic_physical_id_table_page
;
78 struct hlist_node hnode
;
80 struct kvm_sev_info sev_info
;
85 struct kvm_vmcb_info
{
89 uint64_t asid_generation
;
92 struct svm_nested_state
{
93 struct kvm_vmcb_info vmcb02
;
99 /* These are the merged vectors */
102 /* A VMRUN has started but has not yet been performed, so
103 * we cannot inject a nested vmexit yet. */
104 bool nested_run_pending
;
106 /* cache for control fields of the guest */
107 struct vmcb_control_area ctl
;
113 struct kvm_vcpu vcpu
;
114 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
116 struct kvm_vmcb_info vmcb01
;
117 struct kvm_vmcb_info
*current_vmcb
;
118 struct svm_cpu_data
*svm_data
;
130 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
131 * translated into the appropriate L2_CFG bits on the host to
132 * perform speculative control.
140 struct svm_nested_state nested
;
143 u64 nmi_singlestep_guest_rflags
;
145 unsigned int3_injected
;
146 unsigned long int3_rip
;
148 /* cached guest cpuid flags for faster access */
149 bool nrips_enabled
: 1;
153 struct page
*avic_backing_page
;
154 u64
*avic_physical_id_cache
;
155 bool avic_is_running
;
158 * Per-vcpu list of struct amd_svm_iommu_ir:
159 * This is used mainly to store interrupt remapping information used
160 * when update the vcpu affinity. This avoids the need to scan for
161 * IRTE and try to match ga_tag in the IOMMU driver.
163 struct list_head ir_list
;
164 spinlock_t ir_list_lock
;
166 /* Save desired MSR intercept (read: pass-through) state */
168 DECLARE_BITMAP(read
, MAX_DIRECT_ACCESS_MSRS
);
169 DECLARE_BITMAP(write
, MAX_DIRECT_ACCESS_MSRS
);
170 } shadow_msr_intercept
;
173 struct vmcb_save_area
*vmsa
;
175 struct kvm_host_map ghcb_map
;
176 bool received_first_sipi
;
178 /* SEV-ES scratch area support */
184 bool guest_state_loaded
;
187 struct svm_cpu_data
{
194 struct kvm_ldttss_desc
*tss_desc
;
196 struct page
*save_area
;
197 struct vmcb
*current_vmcb
;
199 /* index = sev_asid, value = vmcb pointer */
200 struct vmcb
**sev_vmcbs
;
203 DECLARE_PER_CPU(struct svm_cpu_data
*, svm_data
);
205 void recalc_intercepts(struct vcpu_svm
*svm
);
207 static inline struct kvm_svm
*to_kvm_svm(struct kvm
*kvm
)
209 return container_of(kvm
, struct kvm_svm
, kvm
);
212 static inline bool sev_guest(struct kvm
*kvm
)
214 #ifdef CONFIG_KVM_AMD_SEV
215 struct kvm_sev_info
*sev
= &to_kvm_svm(kvm
)->sev_info
;
223 static inline bool sev_es_guest(struct kvm
*kvm
)
225 #ifdef CONFIG_KVM_AMD_SEV
226 struct kvm_sev_info
*sev
= &to_kvm_svm(kvm
)->sev_info
;
228 return sev_guest(kvm
) && sev
->es_active
;
234 static inline void vmcb_mark_all_dirty(struct vmcb
*vmcb
)
236 vmcb
->control
.clean
= 0;
239 static inline void vmcb_mark_all_clean(struct vmcb
*vmcb
)
241 vmcb
->control
.clean
= ((1 << VMCB_DIRTY_MAX
) - 1)
242 & ~VMCB_ALWAYS_DIRTY_MASK
;
245 static inline void vmcb_mark_dirty(struct vmcb
*vmcb
, int bit
)
247 vmcb
->control
.clean
&= ~(1 << bit
);
250 static inline bool vmcb_is_dirty(struct vmcb
*vmcb
, int bit
)
252 return !test_bit(bit
, (unsigned long *)&vmcb
->control
.clean
);
255 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
257 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
260 static inline void vmcb_set_intercept(struct vmcb_control_area
*control
, u32 bit
)
262 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
263 __set_bit(bit
, (unsigned long *)&control
->intercepts
);
266 static inline void vmcb_clr_intercept(struct vmcb_control_area
*control
, u32 bit
)
268 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
269 __clear_bit(bit
, (unsigned long *)&control
->intercepts
);
272 static inline bool vmcb_is_intercept(struct vmcb_control_area
*control
, u32 bit
)
274 WARN_ON_ONCE(bit
>= 32 * MAX_INTERCEPT
);
275 return test_bit(bit
, (unsigned long *)&control
->intercepts
);
278 static inline void set_dr_intercepts(struct vcpu_svm
*svm
)
280 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
282 if (!sev_es_guest(svm
->vcpu
.kvm
)) {
283 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_READ
);
284 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_READ
);
285 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_READ
);
286 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_READ
);
287 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_READ
);
288 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_READ
);
289 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_READ
);
290 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_WRITE
);
291 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_WRITE
);
292 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_WRITE
);
293 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_WRITE
);
294 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_WRITE
);
295 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_WRITE
);
296 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_WRITE
);
299 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
300 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
302 recalc_intercepts(svm
);
305 static inline void clr_dr_intercepts(struct vcpu_svm
*svm
)
307 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
309 vmcb
->control
.intercepts
[INTERCEPT_DR
] = 0;
311 /* DR7 access must remain intercepted for an SEV-ES guest */
312 if (sev_es_guest(svm
->vcpu
.kvm
)) {
313 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
314 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
317 recalc_intercepts(svm
);
320 static inline void set_exception_intercept(struct vcpu_svm
*svm
, u32 bit
)
322 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
324 WARN_ON_ONCE(bit
>= 32);
325 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_EXCEPTION_OFFSET
+ bit
);
327 recalc_intercepts(svm
);
330 static inline void clr_exception_intercept(struct vcpu_svm
*svm
, u32 bit
)
332 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
334 WARN_ON_ONCE(bit
>= 32);
335 vmcb_clr_intercept(&vmcb
->control
, INTERCEPT_EXCEPTION_OFFSET
+ bit
);
337 recalc_intercepts(svm
);
340 static inline void svm_set_intercept(struct vcpu_svm
*svm
, int bit
)
342 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
344 vmcb_set_intercept(&vmcb
->control
, bit
);
346 recalc_intercepts(svm
);
349 static inline void svm_clr_intercept(struct vcpu_svm
*svm
, int bit
)
351 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
353 vmcb_clr_intercept(&vmcb
->control
, bit
);
355 recalc_intercepts(svm
);
358 static inline bool svm_is_intercept(struct vcpu_svm
*svm
, int bit
)
360 return vmcb_is_intercept(&svm
->vmcb
->control
, bit
);
363 static inline bool vgif_enabled(struct vcpu_svm
*svm
)
365 return !!(svm
->vmcb
->control
.int_ctl
& V_GIF_ENABLE_MASK
);
368 static inline void enable_gif(struct vcpu_svm
*svm
)
370 if (vgif_enabled(svm
))
371 svm
->vmcb
->control
.int_ctl
|= V_GIF_MASK
;
373 svm
->vcpu
.arch
.hflags
|= HF_GIF_MASK
;
376 static inline void disable_gif(struct vcpu_svm
*svm
)
378 if (vgif_enabled(svm
))
379 svm
->vmcb
->control
.int_ctl
&= ~V_GIF_MASK
;
381 svm
->vcpu
.arch
.hflags
&= ~HF_GIF_MASK
;
384 static inline bool gif_set(struct vcpu_svm
*svm
)
386 if (vgif_enabled(svm
))
387 return !!(svm
->vmcb
->control
.int_ctl
& V_GIF_MASK
);
389 return !!(svm
->vcpu
.arch
.hflags
& HF_GIF_MASK
);
393 #define MSR_INVALID 0xffffffffU
395 extern bool dump_invalid_vmcb
;
397 u32
svm_msrpm_offset(u32 msr
);
398 u32
*svm_vcpu_alloc_msrpm(void);
399 void svm_vcpu_init_msrpm(struct kvm_vcpu
*vcpu
, u32
*msrpm
);
400 void svm_vcpu_free_msrpm(u32
*msrpm
);
402 int svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
403 void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
404 void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
405 void svm_flush_tlb(struct kvm_vcpu
*vcpu
);
406 void disable_nmi_singlestep(struct vcpu_svm
*svm
);
407 bool svm_smi_blocked(struct kvm_vcpu
*vcpu
);
408 bool svm_nmi_blocked(struct kvm_vcpu
*vcpu
);
409 bool svm_interrupt_blocked(struct kvm_vcpu
*vcpu
);
410 void svm_set_gif(struct vcpu_svm
*svm
, bool value
);
411 int svm_invoke_exit_handler(struct kvm_vcpu
*vcpu
, u64 exit_code
);
412 void set_msr_interception(struct kvm_vcpu
*vcpu
, u32
*msrpm
, u32 msr
,
413 int read
, int write
);
417 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
418 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
419 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
421 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu
*vcpu
)
423 struct vcpu_svm
*svm
= to_svm(vcpu
);
425 return is_guest_mode(vcpu
) && (svm
->nested
.ctl
.int_ctl
& V_INTR_MASKING_MASK
);
428 static inline bool nested_exit_on_smi(struct vcpu_svm
*svm
)
430 return vmcb_is_intercept(&svm
->nested
.ctl
, INTERCEPT_SMI
);
433 static inline bool nested_exit_on_intr(struct vcpu_svm
*svm
)
435 return vmcb_is_intercept(&svm
->nested
.ctl
, INTERCEPT_INTR
);
438 static inline bool nested_exit_on_nmi(struct vcpu_svm
*svm
)
440 return vmcb_is_intercept(&svm
->nested
.ctl
, INTERCEPT_NMI
);
443 int enter_svm_guest_mode(struct kvm_vcpu
*vcpu
, u64 vmcb_gpa
, struct vmcb
*vmcb12
);
444 void svm_leave_nested(struct vcpu_svm
*svm
);
445 void svm_free_nested(struct vcpu_svm
*svm
);
446 int svm_allocate_nested(struct vcpu_svm
*svm
);
447 int nested_svm_vmrun(struct kvm_vcpu
*vcpu
);
448 void nested_svm_vmloadsave(struct vmcb
*from_vmcb
, struct vmcb
*to_vmcb
);
449 int nested_svm_vmexit(struct vcpu_svm
*svm
);
451 static inline int nested_svm_simple_vmexit(struct vcpu_svm
*svm
, u32 exit_code
)
453 svm
->vmcb
->control
.exit_code
= exit_code
;
454 svm
->vmcb
->control
.exit_info_1
= 0;
455 svm
->vmcb
->control
.exit_info_2
= 0;
456 return nested_svm_vmexit(svm
);
459 int nested_svm_exit_handled(struct vcpu_svm
*svm
);
460 int nested_svm_check_permissions(struct kvm_vcpu
*vcpu
);
461 int nested_svm_check_exception(struct vcpu_svm
*svm
, unsigned nr
,
462 bool has_error_code
, u32 error_code
);
463 int nested_svm_exit_special(struct vcpu_svm
*svm
);
464 void nested_sync_control_from_vmcb02(struct vcpu_svm
*svm
);
465 void nested_vmcb02_compute_g_pat(struct vcpu_svm
*svm
);
466 void svm_switch_vmcb(struct vcpu_svm
*svm
, struct kvm_vmcb_info
*target_vmcb
);
468 extern struct kvm_x86_nested_ops svm_nested_ops
;
472 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
473 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
474 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
476 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
477 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
478 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
479 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
481 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
485 static inline void avic_update_vapic_bar(struct vcpu_svm
*svm
, u64 data
)
487 svm
->vmcb
->control
.avic_vapic_bar
= data
& VMCB_AVIC_APIC_BAR_MASK
;
488 vmcb_mark_dirty(svm
->vmcb
, VMCB_AVIC
);
491 static inline bool avic_vcpu_is_running(struct kvm_vcpu
*vcpu
)
493 struct vcpu_svm
*svm
= to_svm(vcpu
);
494 u64
*entry
= svm
->avic_physical_id_cache
;
499 return (READ_ONCE(*entry
) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK
);
502 int avic_ga_log_notifier(u32 ga_tag
);
503 void avic_vm_destroy(struct kvm
*kvm
);
504 int avic_vm_init(struct kvm
*kvm
);
505 void avic_init_vmcb(struct vcpu_svm
*svm
);
506 void svm_toggle_avic_for_irq_window(struct kvm_vcpu
*vcpu
, bool activate
);
507 int avic_incomplete_ipi_interception(struct kvm_vcpu
*vcpu
);
508 int avic_unaccelerated_access_interception(struct kvm_vcpu
*vcpu
);
509 int avic_init_vcpu(struct vcpu_svm
*svm
);
510 void avic_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
511 void avic_vcpu_put(struct kvm_vcpu
*vcpu
);
512 void avic_post_state_restore(struct kvm_vcpu
*vcpu
);
513 void svm_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
);
514 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu
*vcpu
);
515 bool svm_check_apicv_inhibit_reasons(ulong bit
);
516 void svm_pre_update_apicv_exec_ctrl(struct kvm
*kvm
, bool activate
);
517 void svm_load_eoi_exitmap(struct kvm_vcpu
*vcpu
, u64
*eoi_exit_bitmap
);
518 void svm_hwapic_irr_update(struct kvm_vcpu
*vcpu
, int max_irr
);
519 void svm_hwapic_isr_update(struct kvm_vcpu
*vcpu
, int max_isr
);
520 int svm_deliver_avic_intr(struct kvm_vcpu
*vcpu
, int vec
);
521 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu
*vcpu
);
522 int svm_update_pi_irte(struct kvm
*kvm
, unsigned int host_irq
,
523 uint32_t guest_irq
, bool set
);
524 void svm_vcpu_blocking(struct kvm_vcpu
*vcpu
);
525 void svm_vcpu_unblocking(struct kvm_vcpu
*vcpu
);
529 #define GHCB_VERSION_MAX 1ULL
530 #define GHCB_VERSION_MIN 1ULL
533 extern unsigned int max_sev_asid
;
535 void sev_vm_destroy(struct kvm
*kvm
);
536 int svm_mem_enc_op(struct kvm
*kvm
, void __user
*argp
);
537 int svm_register_enc_region(struct kvm
*kvm
,
538 struct kvm_enc_region
*range
);
539 int svm_unregister_enc_region(struct kvm
*kvm
,
540 struct kvm_enc_region
*range
);
541 int svm_vm_copy_asid_from(struct kvm
*kvm
, unsigned int source_fd
);
542 void pre_sev_run(struct vcpu_svm
*svm
, int cpu
);
543 void __init
sev_set_cpu_caps(void);
544 void __init
sev_hardware_setup(void);
545 void sev_hardware_teardown(void);
546 int sev_cpu_init(struct svm_cpu_data
*sd
);
547 void sev_free_vcpu(struct kvm_vcpu
*vcpu
);
548 int sev_handle_vmgexit(struct kvm_vcpu
*vcpu
);
549 int sev_es_string_io(struct vcpu_svm
*svm
, int size
, unsigned int port
, int in
);
550 void sev_es_init_vmcb(struct vcpu_svm
*svm
);
551 void sev_es_create_vcpu(struct vcpu_svm
*svm
);
552 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
);
553 void sev_es_prepare_guest_switch(struct vcpu_svm
*svm
, unsigned int cpu
);
554 void sev_es_unmap_ghcb(struct vcpu_svm
*svm
);
558 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa
);
559 void __svm_vcpu_run(unsigned long vmcb_pa
, unsigned long *regs
);