]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0009-x86-kvm-Introduce-kvm_-un-map_gfn.patch
update sources to Ubuntu-5.3.0-40.32
[pve-kernel.git] / patches / kernel / 0009-x86-kvm-Introduce-kvm_-un-map_gfn.patch
CommitLineData
de6f4b1d
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
3Date: Fri, 31 Jan 2020 08:06:41 -0300
4Subject: [PATCH] x86/kvm: Introduce kvm_(un)map_gfn()
5
6CVE-2019-3016
7CVE-2020-3016
8
9kvm_vcpu_(un)map operates on gfns from any current address space.
10In certain cases we want to make sure we are not mapping SMRAM
11and for that we can use kvm_(un)map_gfn() that we are introducing
12in this patch.
13
14This is part of CVE-2019-3016.
15
16Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
17Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
18Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
19Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
20Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
21---
22 include/linux/kvm_host.h | 2 ++
23 virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++-----
24 2 files changed, 26 insertions(+), 5 deletions(-)
25
26diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
27index d41c521a39da..df4cc0ead363 100644
28--- a/include/linux/kvm_host.h
29+++ b/include/linux/kvm_host.h
30@@ -758,8 +758,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
31 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
32 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
33 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
34+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
35 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
36 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
37+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
38 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
39 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
40 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
41diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
42index 91e56a9b0661..6614e030ae75 100644
43--- a/virt/kvm/kvm_main.c
44+++ b/virt/kvm/kvm_main.c
45@@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
46 }
47 EXPORT_SYMBOL_GPL(gfn_to_page);
48
49-static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
50+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
51 struct kvm_host_map *map)
52 {
53 kvm_pfn_t pfn;
54 void *hva = NULL;
55 struct page *page = KVM_UNMAPPED_PAGE;
56+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
57
58 if (!map)
59 return -EINVAL;
60@@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
61 return 0;
62 }
63
64+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
65+{
66+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
67+}
68+EXPORT_SYMBOL_GPL(kvm_map_gfn);
69+
70 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
71 {
72- return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
73+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
74 }
75 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
76
77-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
78- bool dirty)
79+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
80+ struct kvm_host_map *map, bool dirty)
81 {
82 if (!map)
83 return;
84@@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
85 #endif
86
87 if (dirty) {
88- kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
89+ mark_page_dirty_in_slot(memslot, map->gfn);
90 kvm_release_pfn_dirty(map->pfn);
91 } else {
92 kvm_release_pfn_clean(map->pfn);
93@@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
94 map->hva = NULL;
95 map->page = NULL;
96 }
97+
98+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
99+{
100+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
101+ return 0;
102+}
103+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
104+
105+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
106+{
107+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
108+}
109 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
110
111 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)