]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
KVM: replace x86 kvm n_free_mmu_pages with n_used_mmu_pages
authorDave Hansen <dave@linux.vnet.ibm.com>
Fri, 20 Aug 2010 01:11:28 +0000 (18:11 -0700)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:51:18 +0000 (10:51 +0200)
Doing this makes the code much more readable.  That's
borne out by the fact that this patch removes code.  "used"
also happens to be the number that we need to return back to
the slab code when our shrinker gets called.  Keeping this
value as opposed to free makes the next patch simpler.

So, 'struct kvm' is kzalloc()'d.  'struct kvm_arch' is a
structure member (and not a pointer) of 'struct kvm'.  That
means they start out zeroed.  I _think_ they get initialized
properly by kvm_mmu_change_mmu_pages().  But, that only happens
via kvm ioctls.

Another benefit of storing 'used' intead of 'free' is
that the values are consistent from the moment the structure is
allocated: no negative "used" value.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h

index 02963684cd282348f5171838daabff747d435fe7..e01b7282556480bf3cdc2f09a0b72f2d4acf6929 100644 (file)
@@ -367,7 +367,7 @@ struct kvm_vcpu_arch {
 };
 
 struct kvm_arch {
-       unsigned int n_free_mmu_pages;
+       unsigned int n_used_mmu_pages;
        unsigned int n_requested_mmu_pages;
        unsigned int n_max_mmu_pages;
        atomic_t invlpg_counter;
index 6979e7d1464e3c72e7cf68dfe6883f06b6dd6e83..ff39b85d7a4da80d8d2d9fc0342c78054d7bdc3c 100644 (file)
@@ -980,7 +980,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        if (!sp->role.direct)
                __free_page(virt_to_page(sp->gfns));
        kmem_cache_free(mmu_page_header_cache, sp);
-       ++kvm->arch.n_free_mmu_pages;
+       --kvm->arch.n_used_mmu_pages;
 }
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -1003,7 +1003,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
        sp->multimapped = 0;
        sp->parent_pte = parent_pte;
-       --vcpu->kvm->arch.n_free_mmu_pages;
+       ++vcpu->kvm->arch.n_used_mmu_pages;
        return sp;
 }
 
@@ -1689,41 +1689,32 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 
 /*
  * Changing the number of mmu pages allocated to the vm
- * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
+ * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
 {
-       int used_pages;
        LIST_HEAD(invalid_list);
-
-       used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
-       used_pages = max(0, used_pages);
-
        /*
         * If we set the number of mmu pages to be smaller be than the
         * number of actived pages , we must to free some mmu pages before we
         * change the value
         */
 
-       if (used_pages > kvm_nr_mmu_pages) {
-               while (used_pages > kvm_nr_mmu_pages &&
+       if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
+               while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
                        !list_empty(&kvm->arch.active_mmu_pages)) {
                        struct kvm_mmu_page *page;
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
-                       used_pages -= kvm_mmu_prepare_zap_page(kvm, page,
+                       kvm_mmu_prepare_zap_page(kvm, page,
                                                               &invalid_list);
                }
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
-               kvm_nr_mmu_pages = used_pages;
-               kvm->arch.n_free_mmu_pages = 0;
+               goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
        }
-       else
-               kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
-                                        - kvm->arch.n_max_mmu_pages;
 
-       kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
+       kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
 }
 
 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
index c3a689ae7df03bdb09d535e993ee4bb215529edc..f05a03dfba4e47758628691b39f1570f0bd6cb52 100644 (file)
@@ -52,7 +52,8 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
-       return kvm->arch.n_free_mmu_pages;
+       return kvm->arch.n_max_mmu_pages -
+               kvm->arch.n_used_mmu_pages;
 }
 
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)