]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
KVM: SVM: Add support for the SEV-ES VMSA
authorTom Lendacky <thomas.lendacky@amd.com>
Thu, 10 Dec 2020 17:09:40 +0000 (11:09 -0600)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 14 Dec 2020 16:09:32 +0000 (11:09 -0500)
Allocate a page during vCPU creation to be used as the encrypted VM save
area (VMSA) for the SEV-ES guest. Provide a flag in the kvm_vcpu_arch
structure that indicates whether the guest state is protected.

When freeing a VMSA page that has been encrypted, the cache contents must
be flushed using the MSR_AMD64_VM_PAGE_FLUSH before freeing the page.

[ i386 build warnings ]
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <fde272b17eec804f3b9db18c131262fe074015c5.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index f002cdb13a0b15dd62370e88665dce22dfe8dc8f..8cf6b0493d49268a9b41ab0428ce1d997cf571e9 100644 (file)
@@ -805,6 +805,9 @@ struct kvm_vcpu_arch {
                 */
                bool enforce;
        } pv_cpuid;
+
+       /* Protected Guests */
+       bool guest_state_protected;
 };
 
 struct kvm_lpage_info {
index a2b01cbd051139f0fca8149615206ba6909620cf..501adb43ece37a4e37517cfced40b1fa13ba0282 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/psp-sev.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
+#include <linux/processor.h>
 
 #include "x86.h"
 #include "svm.h"
@@ -1190,6 +1191,72 @@ void sev_hardware_teardown(void)
        sev_flush_asids();
 }
 
+/*
+ * Pages used by hardware to hold guest encrypted state must be flushed before
+ * returning them to the system.
+ */
+static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
+                                  unsigned long len)
+{
+       /*
+        * If hardware enforced cache coherency for encrypted mappings of the
+        * same physical page is supported, nothing to do.
+        */
+       if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
+               return;
+
+       /*
+        * If the VM Page Flush MSR is supported, use it to flush the page
+        * (using the page virtual address and the guest ASID).
+        */
+       if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
+               struct kvm_sev_info *sev;
+               unsigned long va_start;
+               u64 start, stop;
+
+               /* Align start and stop to page boundaries. */
+               va_start = (unsigned long)va;
+               start = (u64)va_start & PAGE_MASK;
+               stop = PAGE_ALIGN((u64)va_start + len);
+
+               if (start < stop) {
+                       sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+
+                       while (start < stop) {
+                               wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
+                                      start | sev->asid);
+
+                               start += PAGE_SIZE;
+                       }
+
+                       return;
+               }
+
+               WARN(1, "Address overflow, using WBINVD\n");
+       }
+
+       /*
+        * Hardware should always have one of the above features,
+        * but if not, use WBINVD and issue a warning.
+        */
+       WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
+       wbinvd_on_all_cpus();
+}
+
+void sev_free_vcpu(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm;
+
+       if (!sev_es_guest(vcpu->kvm))
+               return;
+
+       svm = to_svm(vcpu);
+
+       if (vcpu->arch.guest_state_protected)
+               sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
+       __free_page(virt_to_page(svm->vmsa));
+}
+
 void pre_sev_run(struct vcpu_svm *svm, int cpu)
 {
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
index 8cb9474b6a0363126ed7b2b4acfe4f38ad4574fa..801e0a641258f90cad04818ec40348970c0a28a5 100644 (file)
@@ -1288,6 +1288,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm;
        struct page *vmcb_page;
+       struct page *vmsa_page = NULL;
        int err;
 
        BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
@@ -1298,9 +1299,19 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        if (!vmcb_page)
                goto out;
 
+       if (sev_es_guest(svm->vcpu.kvm)) {
+               /*
+                * SEV-ES guests require a separate VMSA page used to contain
+                * the encrypted register state of the guest.
+                */
+               vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+               if (!vmsa_page)
+                       goto error_free_vmcb_page;
+       }
+
        err = avic_init_vcpu(svm);
        if (err)
-               goto error_free_vmcb_page;
+               goto error_free_vmsa_page;
 
        /* We initialize this flag to true to make sure that the is_running
         * bit would be set the first time the vcpu is loaded.
@@ -1310,12 +1321,16 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 
        svm->msrpm = svm_vcpu_alloc_msrpm();
        if (!svm->msrpm)
-               goto error_free_vmcb_page;
+               goto error_free_vmsa_page;
 
        svm_vcpu_init_msrpm(vcpu, svm->msrpm);
 
        svm->vmcb = page_address(vmcb_page);
        svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT);
+
+       if (vmsa_page)
+               svm->vmsa = page_address(vmsa_page);
+
        svm->asid_generation = 0;
        init_vmcb(svm);
 
@@ -1324,6 +1339,9 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 
        return 0;
 
+error_free_vmsa_page:
+       if (vmsa_page)
+               __free_page(vmsa_page);
 error_free_vmcb_page:
        __free_page(vmcb_page);
 out:
@@ -1351,6 +1369,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 
        svm_free_nested(svm);
 
+       sev_free_vcpu(vcpu);
+
        __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
        __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 }
index ef0f0dfabc692bd1d41624e48d9bb0b1f6c29f07..f96a0a66ca35dc634ca23df9e4b0c414bdc3466b 100644 (file)
@@ -168,6 +168,10 @@ struct vcpu_svm {
                DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
                DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
        } shadow_msr_intercept;
+
+       /* SEV-ES support */
+       struct vmcb_save_area *vmsa;
+       struct ghcb *ghcb;
 };
 
 struct svm_cpu_data {
@@ -513,5 +517,6 @@ int svm_unregister_enc_region(struct kvm *kvm,
 void pre_sev_run(struct vcpu_svm *svm, int cpu);
 void __init sev_hardware_setup(void);
 void sev_hardware_teardown(void);
+void sev_free_vcpu(struct kvm_vcpu *vcpu);
 
 #endif