]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0009-x86-kvm-Cache-gfn-to-pfn-translation.patch
rebase patches on top of Ubuntu-5.3.0-40.32
[pve-kernel.git] / patches / kernel / 0009-x86-kvm-Cache-gfn-to-pfn-translation.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
3 Date: Fri, 31 Jan 2020 08:06:42 -0300
4 Subject: [PATCH] x86/kvm: Cache gfn to pfn translation
5
6 CVE-2019-3016
7 CVE-2020-3016
8
9 __kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
10 * relatively expensive
11 * in certain cases (such as when done from atomic context) cannot be called
12
13 Stashing gfn-to-pfn mapping should help with both cases.
14
15 This is part of CVE-2019-3016.
16
17 Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
18 Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
19 Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
20 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
21 ---
22 arch/x86/include/asm/kvm_host.h | 1 +
23 arch/x86/kvm/x86.c | 10 ++++
24 include/linux/kvm_host.h | 7 ++-
25 include/linux/kvm_types.h | 9 ++-
26 virt/kvm/kvm_main.c | 98 ++++++++++++++++++++++++++-------
27 5 files changed, 103 insertions(+), 22 deletions(-)
28
29 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
30 index f68e174f452f..7c06343614a4 100644
31 --- a/arch/x86/include/asm/kvm_host.h
32 +++ b/arch/x86/include/asm/kvm_host.h
33 @@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
34 u64 last_steal;
35 struct gfn_to_hva_cache stime;
36 struct kvm_steal_time steal;
37 + struct gfn_to_pfn_cache cache;
38 } st;
39
40 u64 tsc_offset;
41 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
42 index 80e860bd39d5..cb18560b07bc 100644
43 --- a/arch/x86/kvm/x86.c
44 +++ b/arch/x86/kvm/x86.c
45 @@ -8945,6 +8945,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
46 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
47 {
48 void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
49 + struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
50 +
51 + kvm_release_pfn(cache->pfn, cache->dirty, cache);
52
53 kvmclock_reset(vcpu);
54
55 @@ -9611,11 +9614,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
56
57 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
58 {
59 + struct kvm_vcpu *vcpu;
60 + int i;
61 +
62 /*
63 * memslots->generation has been incremented.
64 * mmio generation may have reached its maximum value.
65 */
66 kvm_mmu_invalidate_mmio_sptes(kvm, gen);
67 +
68 + /* Force re-initialization of steal_time cache */
69 + kvm_for_each_vcpu(i, vcpu, kvm)
70 + kvm_vcpu_kick(vcpu);
71 }
72
73 int kvm_arch_prepare_memory_region(struct kvm *kvm,
74 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
75 index df4cc0ead363..abfc2fbde957 100644
76 --- a/include/linux/kvm_host.h
77 +++ b/include/linux/kvm_host.h
78 @@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
79 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
80 void kvm_get_pfn(kvm_pfn_t pfn);
81
82 +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
83 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
84 int len);
85 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
86 @@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
87 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
88 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
89 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
90 -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
91 +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
92 + struct gfn_to_pfn_cache *cache, bool atomic);
93 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
94 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
95 -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
96 +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
97 + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
98 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
99 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
100 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
101 diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
102 index bde5374ae021..2382cb58969d 100644
103 --- a/include/linux/kvm_types.h
104 +++ b/include/linux/kvm_types.h
105 @@ -18,7 +18,7 @@ struct kvm_memslots;
106
107 enum kvm_mr_change;
108
109 -#include <asm/types.h>
110 +#include <linux/types.h>
111
112 /*
113 * Address types:
114 @@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
115 struct kvm_memory_slot *memslot;
116 };
117
118 +struct gfn_to_pfn_cache {
119 + u64 generation;
120 + gfn_t gfn;
121 + kvm_pfn_t pfn;
122 + bool dirty;
123 +};
124 +
125 #endif /* __KVM_TYPES_H__ */
126 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
127 index 6614e030ae75..f05e5b5c30e8 100644
128 --- a/virt/kvm/kvm_main.c
129 +++ b/virt/kvm/kvm_main.c
130 @@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
131 }
132 EXPORT_SYMBOL_GPL(gfn_to_page);
133
134 +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
135 +{
136 + if (pfn == 0)
137 + return;
138 +
139 + if (cache)
140 + cache->pfn = cache->gfn = 0;
141 +
142 + if (dirty)
143 + kvm_release_pfn_dirty(pfn);
144 + else
145 + kvm_release_pfn_clean(pfn);
146 +}
147 +
148 +static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
149 + struct gfn_to_pfn_cache *cache, u64 gen)
150 +{
151 + kvm_release_pfn(cache->pfn, cache->dirty, cache);
152 +
153 + cache->pfn = gfn_to_pfn_memslot(slot, gfn);
154 + cache->gfn = gfn;
155 + cache->dirty = false;
156 + cache->generation = gen;
157 +}
158 +
159 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
160 - struct kvm_host_map *map)
161 + struct kvm_host_map *map,
162 + struct gfn_to_pfn_cache *cache,
163 + bool atomic)
164 {
165 kvm_pfn_t pfn;
166 void *hva = NULL;
167 struct page *page = KVM_UNMAPPED_PAGE;
168 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
169 + u64 gen = slots->generation;
170
171 if (!map)
172 return -EINVAL;
173
174 - pfn = gfn_to_pfn_memslot(slot, gfn);
175 + if (cache) {
176 + if (!cache->pfn || cache->gfn != gfn ||
177 + cache->generation != gen) {
178 + if (atomic)
179 + return -EAGAIN;
180 + kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
181 + }
182 + pfn = cache->pfn;
183 + } else {
184 + if (atomic)
185 + return -EAGAIN;
186 + pfn = gfn_to_pfn_memslot(slot, gfn);
187 + }
188 if (is_error_noslot_pfn(pfn))
189 return -EINVAL;
190
191 if (pfn_valid(pfn)) {
192 page = pfn_to_page(pfn);
193 - hva = kmap(page);
194 + if (atomic)
195 + hva = kmap_atomic(page);
196 + else
197 + hva = kmap(page);
198 #ifdef CONFIG_HAS_IOMEM
199 - } else {
200 + } else if (!atomic) {
201 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
202 + } else {
203 + return -EINVAL;
204 #endif
205 }
206
207 @@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
208 return 0;
209 }
210
211 -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
212 +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
213 + struct gfn_to_pfn_cache *cache, bool atomic)
214 {
215 - return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
216 + return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
217 + cache, atomic);
218 }
219 EXPORT_SYMBOL_GPL(kvm_map_gfn);
220
221 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
222 {
223 - return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
224 + return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
225 + NULL, false);
226 }
227 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
228
229 static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
230 - struct kvm_host_map *map, bool dirty)
231 + struct kvm_host_map *map,
232 + struct gfn_to_pfn_cache *cache,
233 + bool dirty, bool atomic)
234 {
235 if (!map)
236 return;
237 @@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
238 if (!map->hva)
239 return;
240
241 - if (map->page != KVM_UNMAPPED_PAGE)
242 - kunmap(map->page);
243 + if (map->page != KVM_UNMAPPED_PAGE) {
244 + if (atomic)
245 + kunmap_atomic(map->hva);
246 + else
247 + kunmap(map->page);
248 + }
249 #ifdef CONFIG_HAS_IOMEM
250 - else
251 + else if (!atomic)
252 memunmap(map->hva);
253 + else
254 + WARN_ONCE(1, "Unexpected unmapping in atomic context");
255 #endif
256
257 - if (dirty) {
258 + if (dirty)
259 mark_page_dirty_in_slot(memslot, map->gfn);
260 - kvm_release_pfn_dirty(map->pfn);
261 - } else {
262 - kvm_release_pfn_clean(map->pfn);
263 - }
264 +
265 + if (cache)
266 + cache->dirty |= dirty;
267 + else
268 + kvm_release_pfn(map->pfn, dirty, NULL);
269
270 map->hva = NULL;
271 map->page = NULL;
272 }
273
274 -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
275 +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
276 + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
277 {
278 - __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
279 + __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
280 + cache, dirty, atomic);
281 return 0;
282 }
283 EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
284
285 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
286 {
287 - __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
288 + __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
289 + dirty, false);
290 }
291 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
292