]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
KVM: SVM: refactor msr permission bitmap allocation
authorMaxim Levitsky <mlevitsk@redhat.com>
Thu, 27 Aug 2020 17:11:40 +0000 (20:11 +0300)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 28 Sep 2020 11:57:11 +0000 (07:57 -0400)
Replace svm_vcpu_init_msrpm with svm_vcpu_alloc_msrpm, that also allocates
the msr bitmap and add svm_vcpu_free_msrpm to free it.

This will be used later to move the nested msr permission bitmap allocation
to nested.c

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200827171145.374620-4-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c

index bd3515f25ce2fca6de947d5185c662d1e17aaab6..535839cdcb567a83dd39bb54671268d5eff3155f 100644 (file)
@@ -609,18 +609,29 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
        msrpm[offset] = tmp;
 }
 
-static void svm_vcpu_init_msrpm(u32 *msrpm)
+static u32 *svm_vcpu_init_msrpm(void)
 {
        int i;
+       u32 *msrpm;
+       struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
+
+       if (!pages)
+               return NULL;
 
+       msrpm = page_address(pages);
        memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
 
        for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
                if (!direct_access_msrs[i].always)
                        continue;
-
                set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
        }
+       return msrpm;
+}
+
+static void svm_vcpu_free_msrpm(u32 *msrpm)
+{
+       __free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER);
 }
 
 static void add_msr_offset(u32 offset)
@@ -1172,9 +1183,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm;
        struct page *vmcb_page;
-       struct page *msrpm_pages;
        struct page *hsave_page;
-       struct page *nested_msrpm_pages;
        int err;
 
        BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
@@ -1185,21 +1194,13 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        if (!vmcb_page)
                goto out;
 
-       msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
-       if (!msrpm_pages)
-               goto free_page1;
-
-       nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
-       if (!nested_msrpm_pages)
-               goto free_page2;
-
        hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
        if (!hsave_page)
-               goto free_page3;
+               goto free_page1;
 
        err = avic_init_vcpu(svm);
        if (err)
-               goto free_page4;
+               goto free_page2;
 
        /* We initialize this flag to true to make sure that the is_running
         * bit would be set the first time the vcpu is loaded.
@@ -1210,11 +1211,13 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        svm->nested.hsave = page_address(hsave_page);
        clear_page(svm->nested.hsave);
 
-       svm->msrpm = page_address(msrpm_pages);
-       svm_vcpu_init_msrpm(svm->msrpm);
+       svm->msrpm = svm_vcpu_init_msrpm();
+       if (!svm->msrpm)
+               goto free_page2;
 
-       svm->nested.msrpm = page_address(nested_msrpm_pages);
-       svm_vcpu_init_msrpm(svm->nested.msrpm);
+       svm->nested.msrpm = svm_vcpu_init_msrpm();
+       if (!svm->nested.msrpm)
+               goto free_page3;
 
        svm->vmcb = page_address(vmcb_page);
        clear_page(svm->vmcb);
@@ -1227,12 +1230,10 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 
        return 0;
 
-free_page4:
-       __free_page(hsave_page);
 free_page3:
-       __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+       svm_vcpu_free_msrpm(svm->msrpm);
 free_page2:
-       __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+       __free_page(hsave_page);
 free_page1:
        __free_page(vmcb_page);
 out: