]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - arch/x86/kvm/mmu.c
KVM: disable uninitialized var warning
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kvm / mmu.c
index 72102e0ab7cb3a0ae2302aa10eadd6bdb73d939a..24dd43d45ae403a4af0800b5f65183c675a07290 100644 (file)
@@ -1238,11 +1238,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
                         unsigned long data)
 {
        u64 *sptep;
-       struct rmap_iterator iter;
+       struct rmap_iterator uninitialized_var(iter);
        int young = 0;
 
        /*
-        * Emulate the accessed bit for EPT, by checking if this page has
+        * In case of absence of EPT Access and Dirty Bits supports,
+        * emulate the accessed bit for EPT, by checking if this page has
         * an EPT mapping, and clearing it if it does. On the next access,
         * a new EPT mapping will be established.
         * This has some overhead, but not as much as the cost of swapping
@@ -1253,11 +1254,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;
             sptep = rmap_get_next(&iter)) {
-               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               BUG_ON(!is_shadow_present_pte(*sptep));
 
-               if (*sptep & PT_ACCESSED_MASK) {
+               if (*sptep & shadow_accessed_mask) {
                        young = 1;
-                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
+                       clear_bit((ffs(shadow_accessed_mask) - 1),
+                                (unsigned long *)sptep);
                }
        }
 
@@ -1281,9 +1283,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;
             sptep = rmap_get_next(&iter)) {
-               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               BUG_ON(!is_shadow_present_pte(*sptep));
 
-               if (*sptep & PT_ACCESSED_MASK) {
+               if (*sptep & shadow_accessed_mask) {
                        young = 1;
                        break;
                }
@@ -2595,8 +2597,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
                        *gfnp = gfn;
                        kvm_release_pfn_clean(pfn);
                        pfn &= ~mask;
-                       if (!get_page_unless_zero(pfn_to_page(pfn)))
-                               BUG();
+                       kvm_get_pfn(pfn);
                        *pfnp = pfn;
                }
        }
@@ -3943,7 +3944,6 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct kvm *kvm;
-       struct kvm *kvm_freed = NULL;
        int nr_to_scan = sc->nr_to_scan;
 
        if (nr_to_scan == 0)
@@ -3955,22 +3955,30 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
                int idx;
                LIST_HEAD(invalid_list);
 
+               /*
+                * n_used_mmu_pages is accessed without holding kvm->mmu_lock
+                * here. We may skip a VM instance errorneosly, but we do not
+                * want to shrink a VM that only started to populate its MMU
+                * anyway.
+                */
+               if (kvm->arch.n_used_mmu_pages > 0) {
+                       if (!nr_to_scan--)
+                               break;
+                       continue;
+               }
+
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
-               if (!kvm_freed && nr_to_scan > 0 &&
-                   kvm->arch.n_used_mmu_pages > 0) {
-                       kvm_mmu_remove_some_alloc_mmu_pages(kvm,
-                                                           &invalid_list);
-                       kvm_freed = kvm;
-               }
-               nr_to_scan--;
 
+               kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
+
                spin_unlock(&kvm->mmu_lock);
                srcu_read_unlock(&kvm->srcu, idx);
+
+               list_move_tail(&kvm->vm_list, &vm_list);
+               break;
        }
-       if (kvm_freed)
-               list_move_tail(&kvm_freed->vm_list, &vm_list);
 
        raw_spin_unlock(&kvm_lock);