]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
x86/kvm: Cache gfn to pfn translation
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Thu, 5 Dec 2019 01:30:51 +0000 (01:30 +0000)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Fri, 14 Feb 2020 14:45:44 +0000 (15:45 +0100)
CVE-2019-3016

__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
* relatively expensive
* in certain cases (such as when done from atomic context) cannot be called

Stashing gfn-to-pfn mapping should help with both cases.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 917248144db5d7320655dbb41d3af0b8a0f3d589)
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Acked-by: Sultan Alsawaf <sultan.alsawaf@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
include/linux/kvm_host.h
include/linux/kvm_types.h
virt/kvm/kvm_main.c

index f68e174f452f879fb066e38b45f538aed74e3a00..7c06343614a486519cfd05dd72065ec33ad188bf 100644 (file)
@@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
                u64 last_steal;
                struct gfn_to_hva_cache stime;
                struct kvm_steal_time steal;
+               struct gfn_to_pfn_cache cache;
        } st;
 
        u64 tsc_offset;
index 80e860bd39d5afd38a321ae670a7952a55ceadf8..cb18560b07bc6dd644c9267a39807ac11c007b50 100644 (file)
@@ -8945,6 +8945,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
        void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+       struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
+
+       kvm_release_pfn(cache->pfn, cache->dirty, cache);
 
        kvmclock_reset(vcpu);
 
@@ -9611,11 +9614,18 @@ out_free:
 
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
+       struct kvm_vcpu *vcpu;
+       int i;
+
        /*
         * memslots->generation has been incremented.
         * mmio generation may have reached its maximum value.
         */
        kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+
+       /* Force re-initialization of steal_time cache */
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_vcpu_kick(vcpu);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
index df4cc0ead363715a986511c0ed830f97016e38f2..abfc2fbde957c35e8c2bd1b393ca4d95b3b98d8b 100644 (file)
@@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 void kvm_get_pfn(kvm_pfn_t pfn);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
                        int len);
 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+               struct gfn_to_pfn_cache *cache, bool atomic);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+                 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
index bde5374ae021e63d0378831ae719c55ab342e655..2382cb58969d70106e32187f058ed005654e34fb 100644 (file)
@@ -18,7 +18,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 /*
  * Address types:
@@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
        struct kvm_memory_slot *memslot;
 };
 
+struct gfn_to_pfn_cache {
+       u64 generation;
+       gfn_t gfn;
+       kvm_pfn_t pfn;
+       bool dirty;
+};
+
 #endif /* __KVM_TYPES_H__ */
index ca08942c78464d2c94214080c252ae81590146dc..669475b59456155175e5be1d91423c18677f4e30 100644 (file)
@@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+{
+       if (pfn == 0)
+               return;
+
+       if (cache)
+               cache->pfn = cache->gfn = 0;
+
+       if (dirty)
+               kvm_release_pfn_dirty(pfn);
+       else
+               kvm_release_pfn_clean(pfn);
+}
+
+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
+                                struct gfn_to_pfn_cache *cache, u64 gen)
+{
+       kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+       cache->pfn = gfn_to_pfn_memslot(slot, gfn);
+       cache->gfn = gfn;
+       cache->dirty = false;
+       cache->generation = gen;
+}
+
 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
-                        struct kvm_host_map *map)
+                        struct kvm_host_map *map,
+                        struct gfn_to_pfn_cache *cache,
+                        bool atomic)
 {
        kvm_pfn_t pfn;
        void *hva = NULL;
        struct page *page = KVM_UNMAPPED_PAGE;
        struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+       u64 gen = slots->generation;
 
        if (!map)
                return -EINVAL;
 
-       pfn = gfn_to_pfn_memslot(slot, gfn);
+       if (cache) {
+               if (!cache->pfn || cache->gfn != gfn ||
+                       cache->generation != gen) {
+                       if (atomic)
+                               return -EAGAIN;
+                       kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
+               }
+               pfn = cache->pfn;
+       } else {
+               if (atomic)
+                       return -EAGAIN;
+               pfn = gfn_to_pfn_memslot(slot, gfn);
+       }
        if (is_error_noslot_pfn(pfn))
                return -EINVAL;
 
        if (pfn_valid(pfn)) {
                page = pfn_to_page(pfn);
-               hva = kmap(page);
+               if (atomic)
+                       hva = kmap_atomic(page);
+               else
+                       hva = kmap(page);
 #ifdef CONFIG_HAS_IOMEM
-       } else {
+       } else if (!atomic) {
                hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+       } else {
+               return -EINVAL;
 #endif
        }
 
@@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
        return 0;
 }
 
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+               struct gfn_to_pfn_cache *cache, bool atomic)
 {
-       return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
+       return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
+                       cache, atomic);
 }
 EXPORT_SYMBOL_GPL(kvm_map_gfn);
 
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
-       return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
+       return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
+               NULL, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
 static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
-                       struct kvm_host_map *map, bool dirty)
+                       struct kvm_host_map *map,
+                       struct gfn_to_pfn_cache *cache,
+                       bool dirty, bool atomic)
 {
        if (!map)
                return;
@@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
        if (!map->hva)
                return;
 
-       if (map->page != KVM_UNMAPPED_PAGE)
-               kunmap(map->page);
+       if (map->page != KVM_UNMAPPED_PAGE) {
+               if (atomic)
+                       kunmap_atomic(map->hva);
+               else
+                       kunmap(map->page);
+       }
 #ifdef CONFIG_HAS_IOMEM
-       else
+       else if (!atomic)
                memunmap(map->hva);
+       else
+               WARN_ONCE(1, "Unexpected unmapping in atomic context");
 #endif
 
-       if (dirty) {
+       if (dirty)
                mark_page_dirty_in_slot(memslot, map->gfn);
-               kvm_release_pfn_dirty(map->pfn);
-       } else {
-               kvm_release_pfn_clean(map->pfn);
-       }
+
+       if (cache)
+               cache->dirty |= dirty;
+       else
+               kvm_release_pfn(map->pfn, dirty, NULL);
 
        map->hva = NULL;
        map->page = NULL;
 }
 
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 
+                 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
 {
-       __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
+       __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
+                       cache, dirty, atomic);
        return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
 
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
 {
-       __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
+       __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
+                       dirty, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);