]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
KVM: x86/mmu: Explicitly track only a single invalid mmu generation
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 13 Sep 2019 02:46:11 +0000 (19:46 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 24 Sep 2019 12:36:00 +0000 (14:36 +0200)
Toggle mmu_valid_gen between '0' and '1' instead of blindly incrementing
the generation.  Because slots_lock is held for the entire duration of
zapping obsolete pages, it's impossible for there to be multiple invalid
generations associated with shadow pages at any given time.

Toggling between the two generations (valid vs. invalid) allows changing
mmu_valid_gen from an unsigned long to a u8, which reduces the size of
struct kvm_mmu_page from 160 to 152 bytes on 64-bit KVM, i.e. reduces
KVM's memory footprint by 8 bytes per shadow page.

Set sp->mmu_valid_gen before it is added to active_mmu_pages.
Functionally this has no effect as kvm_mmu_alloc_page() has a single
caller that sets sp->mmu_valid_gen soon thereafter, but visually it is
jarring to see a shadow page being added to the list without its
mmu_valid_gen first being set.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmutrace.h

index 53e0b956ed3c3edfd14a3eacf42a91c4c7e843a9..c5ed92482e2c9c9cdf71159fd70d25c4e5d4c301 100644 (file)
@@ -320,6 +320,7 @@ struct kvm_mmu_page {
        struct list_head link;
        struct hlist_node hash_link;
        bool unsync;
+       u8 mmu_valid_gen;
        bool mmio_cached;
 
        /*
@@ -335,7 +336,6 @@ struct kvm_mmu_page {
        int root_count;          /* Currently serving as active root */
        unsigned int unsync_children;
        struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
-       unsigned long mmu_valid_gen;
        DECLARE_BITMAP(unsync_child_bitmap, 512);
 
 #ifdef CONFIG_X86_32
@@ -859,7 +859,7 @@ struct kvm_arch {
        unsigned long n_requested_mmu_pages;
        unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
-       unsigned long mmu_valid_gen;
+       u8 mmu_valid_gen;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
         * Hash table of struct kvm_mmu_page.
index e552fd5cd33d2c4eb06037502faa19e19eb02f2b..5f086400036032dbba6de5c2597e32aaeb06b3b1 100644 (file)
@@ -2101,6 +2101,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
         * depends on valid pages being added to the head of the list.  See
         * comments in kvm_zap_obsolete_pages().
         */
+       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
        kvm_mod_used_mmu_pages(vcpu->kvm, +1);
        return sp;
@@ -2537,7 +2538,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
        }
-       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        clear_page(sp->spt);
        trace_kvm_mmu_get_page(sp, true);
 
@@ -5738,9 +5738,19 @@ restart:
  */
 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
 {
+       lockdep_assert_held(&kvm->slots_lock);
+
        spin_lock(&kvm->mmu_lock);
        trace_kvm_mmu_zap_all_fast(kvm);
-       kvm->arch.mmu_valid_gen++;
+
+       /*
+        * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
+        * held for the entire duration of zapping obsolete pages, it's
+        * impossible for there to be multiple invalid generations associated
+        * with *valid* shadow pages at any given time, i.e. there is exactly
+        * one valid generation and (at most) one invalid generation.
+        */
+       kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
 
        /*
         * Notify all vcpus to reload its shadow page table and flush TLB.
index 1a063ba76281ed4e8fa42b401bdd5586e2ae6609..7ca8831c7d1a201fb289363a5e75222f3021a5f5 100644 (file)
@@ -8,11 +8,11 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvmmmu
 
-#define KVM_MMU_PAGE_FIELDS                    \
-       __field(unsigned long, mmu_valid_gen)   \
-       __field(__u64, gfn)                     \
-       __field(__u32, role)                    \
-       __field(__u32, root_count)              \
+#define KVM_MMU_PAGE_FIELDS            \
+       __field(__u8, mmu_valid_gen)    \
+       __field(__u64, gfn)             \
+       __field(__u32, role)            \
+       __field(__u32, root_count)      \
        __field(bool, unsync)
 
 #define KVM_MMU_PAGE_ASSIGN(sp)                                \
@@ -31,7 +31,7 @@
                                                                        \
        role.word = __entry->role;                                      \
                                                                        \
-       trace_seq_printf(p, "sp gen %lx gfn %llx l%u %u-byte q%u%s %s%s"\
+       trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
                         " %snxe %sad root %u %s%c",                    \
                         __entry->mmu_valid_gen,                        \
                         __entry->gfn, role.level,                      \
@@ -288,7 +288,7 @@ TRACE_EVENT(
        TP_ARGS(kvm),
 
        TP_STRUCT__entry(
-               __field(unsigned long, mmu_valid_gen)
+               __field(__u8, mmu_valid_gen)
                __field(unsigned int, mmu_used_pages)
        ),
 
@@ -297,7 +297,7 @@ TRACE_EVENT(
                __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
        ),
 
-       TP_printk("kvm-mmu-valid-gen %lx used_pages %x",
+       TP_printk("kvm-mmu-valid-gen %u used_pages %x",
                  __entry->mmu_valid_gen, __entry->mmu_used_pages
        )
 );