]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
KVM: x86: Move allocation of pio_data page down a few lines
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 18 Dec 2019 21:54:54 +0000 (13:54 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 24 Jan 2020 08:18:57 +0000 (09:18 +0100)
Allocate the pio_data page after creating the MMU and local APIC so that
all direct memory allocations are grouped together.  This allows setting
the return value to -ENOMEM prior to starting the allocations instead of
setting it in the fail path for every allocation.

The pio_data page is only consumed when KVM_RUN is invoked, i.e. moving
its allocation has no real functional impact.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index 29d058db3207ca5fec8204a1b66db3e375df6aa6..50110bca7d57e40732b5367d9d98ead53dcc305c 100644 (file)
@@ -9510,18 +9510,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        else
                vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
 
-       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (!page) {
-               r = -ENOMEM;
-               goto fail;
-       }
-       vcpu->arch.pio_data = page_address(page);
-
        kvm_set_tsc_khz(vcpu, max_tsc_khz);
 
        r = kvm_mmu_create(vcpu);
        if (r < 0)
-               goto fail_free_pio_data;
+               return r;
 
        if (irqchip_in_kernel(vcpu->kvm)) {
                vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
@@ -9531,25 +9524,27 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        } else
                static_key_slow_inc(&kvm_no_apic_vcpu);
 
+       r = -ENOMEM;
+
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page)
+               goto fail_free_lapic;
+       vcpu->arch.pio_data = page_address(page);
+
        vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
                                       GFP_KERNEL_ACCOUNT);
-       if (!vcpu->arch.mce_banks) {
-               r = -ENOMEM;
-               goto fail_free_lapic;
-       }
+       if (!vcpu->arch.mce_banks)
+               goto fail_free_pio_data;
        vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
 
        if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
-                               GFP_KERNEL_ACCOUNT)) {
-               r = -ENOMEM;
+                               GFP_KERNEL_ACCOUNT))
                goto fail_free_mce_banks;
-       }
 
        vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
                                                GFP_KERNEL_ACCOUNT);
        if (!vcpu->arch.user_fpu) {
                pr_err("kvm: failed to allocate userspace's fpu\n");
-               r = -ENOMEM;
                goto free_wbinvd_dirty_mask;
        }
 
@@ -9557,7 +9552,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                                                 GFP_KERNEL_ACCOUNT);
        if (!vcpu->arch.guest_fpu) {
                pr_err("kvm: failed to allocate vcpu's fpu\n");
-               r = -ENOMEM;
                goto free_user_fpu;
        }
        fx_init(vcpu);
@@ -9584,13 +9578,12 @@ free_wbinvd_dirty_mask:
        free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
 fail_free_mce_banks:
        kfree(vcpu->arch.mce_banks);
+fail_free_pio_data:
+       free_page((unsigned long)vcpu->arch.pio_data);
 fail_free_lapic:
        kvm_free_lapic(vcpu);
 fail_mmu_destroy:
        kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
-       free_page((unsigned long)vcpu->arch.pio_data);
-fail:
        return r;
 }