]>
Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
342cd0ab CD |
18 | |
19 | #include <linux/mman.h> | |
20 | #include <linux/kvm_host.h> | |
21 | #include <linux/io.h> | |
ad361f09 | 22 | #include <linux/hugetlb.h> |
196f878a | 23 | #include <linux/sched/signal.h> |
45e96ea6 | 24 | #include <trace/events/kvm.h> |
342cd0ab | 25 | #include <asm/pgalloc.h> |
94f8e641 | 26 | #include <asm/cacheflush.h> |
342cd0ab CD |
27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_mmu.h> | |
45e96ea6 | 29 | #include <asm/kvm_mmio.h> |
d5d8184d | 30 | #include <asm/kvm_asm.h> |
94f8e641 | 31 | #include <asm/kvm_emulate.h> |
1e947bad | 32 | #include <asm/virt.h> |
621f48e4 | 33 | #include <asm/system_misc.h> |
d5d8184d CD |
34 | |
35 | #include "trace.h" | |
342cd0ab | 36 | |
5a677ce0 | 37 | static pgd_t *boot_hyp_pgd; |
2fb41059 | 38 | static pgd_t *hyp_pgd; |
e4c5a685 | 39 | static pgd_t *merged_hyp_pgd; |
342cd0ab CD |
40 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
41 | ||
5a677ce0 MZ |
42 | static unsigned long hyp_idmap_start; |
43 | static unsigned long hyp_idmap_end; | |
44 | static phys_addr_t hyp_idmap_vector; | |
45 | ||
9163ee23 | 46 | #define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t)) |
38f791a4 | 47 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
5d4e08c4 | 48 | |
15a49a44 MS |
49 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) |
50 | #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) | |
51 | ||
52 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) | |
53 | { | |
15a49a44 | 54 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
7276030a MS |
55 | } |
56 | ||
57 | /** | |
58 | * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 | |
59 | * @kvm: pointer to kvm structure. | |
60 | * | |
61 | * Interface to HYP function to flush all VM TLB entries | |
62 | */ | |
63 | void kvm_flush_remote_tlbs(struct kvm *kvm) | |
64 | { | |
65 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | |
15a49a44 | 66 | } |
ad361f09 | 67 | |
48762767 | 68 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
d5d8184d | 69 | { |
8684e701 | 70 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
d5d8184d CD |
71 | } |
72 | ||
363ef89f MZ |
73 | /* |
74 | * D-Cache management functions. They take the page table entries by | |
75 | * value, as they are flushing the cache using the kernel mapping (or | |
76 | * kmap on 32bit). | |
77 | */ | |
78 | static void kvm_flush_dcache_pte(pte_t pte) | |
79 | { | |
80 | __kvm_flush_dcache_pte(pte); | |
81 | } | |
82 | ||
83 | static void kvm_flush_dcache_pmd(pmd_t pmd) | |
84 | { | |
85 | __kvm_flush_dcache_pmd(pmd); | |
86 | } | |
87 | ||
88 | static void kvm_flush_dcache_pud(pud_t pud) | |
89 | { | |
90 | __kvm_flush_dcache_pud(pud); | |
91 | } | |
92 | ||
e6fab544 AB |
93 | static bool kvm_is_device_pfn(unsigned long pfn) |
94 | { | |
95 | return !pfn_valid(pfn); | |
96 | } | |
97 | ||
15a49a44 MS |
98 | /** |
99 | * stage2_dissolve_pmd() - clear and flush huge PMD entry | |
100 | * @kvm: pointer to kvm structure. | |
101 | * @addr: IPA | |
102 | * @pmd: pmd pointer for IPA | |
103 | * | |
104 | * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all | |
105 | * pages in the range dirty. | |
106 | */ | |
107 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) | |
108 | { | |
bbb3b6b3 | 109 | if (!pmd_thp_or_huge(*pmd)) |
15a49a44 MS |
110 | return; |
111 | ||
112 | pmd_clear(pmd); | |
113 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
114 | put_page(virt_to_page(pmd)); | |
115 | } | |
116 | ||
d5d8184d CD |
117 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
118 | int min, int max) | |
119 | { | |
120 | void *page; | |
121 | ||
122 | BUG_ON(max > KVM_NR_MEM_OBJS); | |
123 | if (cache->nobjs >= min) | |
124 | return 0; | |
125 | while (cache->nobjs < max) { | |
126 | page = (void *)__get_free_page(PGALLOC_GFP); | |
127 | if (!page) | |
128 | return -ENOMEM; | |
129 | cache->objects[cache->nobjs++] = page; | |
130 | } | |
131 | return 0; | |
132 | } | |
133 | ||
134 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |
135 | { | |
136 | while (mc->nobjs) | |
137 | free_page((unsigned long)mc->objects[--mc->nobjs]); | |
138 | } | |
139 | ||
140 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |
141 | { | |
142 | void *p; | |
143 | ||
144 | BUG_ON(!mc || !mc->nobjs); | |
145 | p = mc->objects[--mc->nobjs]; | |
146 | return p; | |
147 | } | |
148 | ||
7a1c831e | 149 | static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) |
979acd5e | 150 | { |
7a1c831e SP |
151 | pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL); |
152 | stage2_pgd_clear(pgd); | |
4f853a71 | 153 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
7a1c831e | 154 | stage2_pud_free(pud_table); |
4f853a71 | 155 | put_page(virt_to_page(pgd)); |
979acd5e MZ |
156 | } |
157 | ||
7a1c831e | 158 | static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
342cd0ab | 159 | { |
7a1c831e SP |
160 | pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0); |
161 | VM_BUG_ON(stage2_pud_huge(*pud)); | |
162 | stage2_pud_clear(pud); | |
4f853a71 | 163 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
7a1c831e | 164 | stage2_pmd_free(pmd_table); |
4f728276 MZ |
165 | put_page(virt_to_page(pud)); |
166 | } | |
342cd0ab | 167 | |
7a1c831e | 168 | static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
4f728276 | 169 | { |
4f853a71 | 170 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
bbb3b6b3 | 171 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); |
4f853a71 CD |
172 | pmd_clear(pmd); |
173 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
174 | pte_free_kernel(NULL, pte_table); | |
4f728276 MZ |
175 | put_page(virt_to_page(pmd)); |
176 | } | |
177 | ||
363ef89f MZ |
178 | /* |
179 | * Unmapping vs dcache management: | |
180 | * | |
181 | * If a guest maps certain memory pages as uncached, all writes will | |
182 | * bypass the data cache and go directly to RAM. However, the CPUs | |
183 | * can still speculate reads (not writes) and fill cache lines with | |
184 | * data. | |
185 | * | |
186 | * Those cache lines will be *clean* cache lines though, so a | |
187 | * clean+invalidate operation is equivalent to an invalidate | |
188 | * operation, because no cache lines are marked dirty. | |
189 | * | |
190 | * Those clean cache lines could be filled prior to an uncached write | |
191 | * by the guest, and the cache coherent IO subsystem would therefore | |
192 | * end up writing old data to disk. | |
193 | * | |
194 | * This is why right after unmapping a page/section and invalidating | |
195 | * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure | |
196 | * the IO subsystem will never hit in the cache. | |
197 | */ | |
7a1c831e | 198 | static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, |
4f853a71 | 199 | phys_addr_t addr, phys_addr_t end) |
4f728276 | 200 | { |
4f853a71 CD |
201 | phys_addr_t start_addr = addr; |
202 | pte_t *pte, *start_pte; | |
203 | ||
204 | start_pte = pte = pte_offset_kernel(pmd, addr); | |
205 | do { | |
206 | if (!pte_none(*pte)) { | |
363ef89f MZ |
207 | pte_t old_pte = *pte; |
208 | ||
4f853a71 | 209 | kvm_set_pte(pte, __pte(0)); |
4f853a71 | 210 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
363ef89f MZ |
211 | |
212 | /* No need to invalidate the cache for device mappings */ | |
0de58f85 | 213 | if (!kvm_is_device_pfn(pte_pfn(old_pte))) |
363ef89f MZ |
214 | kvm_flush_dcache_pte(old_pte); |
215 | ||
216 | put_page(virt_to_page(pte)); | |
4f853a71 CD |
217 | } |
218 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
219 | ||
7a1c831e SP |
220 | if (stage2_pte_table_empty(start_pte)) |
221 | clear_stage2_pmd_entry(kvm, pmd, start_addr); | |
342cd0ab CD |
222 | } |
223 | ||
7a1c831e | 224 | static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud, |
4f853a71 | 225 | phys_addr_t addr, phys_addr_t end) |
000d3996 | 226 | { |
4f853a71 CD |
227 | phys_addr_t next, start_addr = addr; |
228 | pmd_t *pmd, *start_pmd; | |
000d3996 | 229 | |
7a1c831e | 230 | start_pmd = pmd = stage2_pmd_offset(pud, addr); |
4f853a71 | 231 | do { |
7a1c831e | 232 | next = stage2_pmd_addr_end(addr, end); |
4f853a71 | 233 | if (!pmd_none(*pmd)) { |
bbb3b6b3 | 234 | if (pmd_thp_or_huge(*pmd)) { |
363ef89f MZ |
235 | pmd_t old_pmd = *pmd; |
236 | ||
4f853a71 CD |
237 | pmd_clear(pmd); |
238 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
363ef89f MZ |
239 | |
240 | kvm_flush_dcache_pmd(old_pmd); | |
241 | ||
4f853a71 CD |
242 | put_page(virt_to_page(pmd)); |
243 | } else { | |
7a1c831e | 244 | unmap_stage2_ptes(kvm, pmd, addr, next); |
4f853a71 | 245 | } |
ad361f09 | 246 | } |
4f853a71 | 247 | } while (pmd++, addr = next, addr != end); |
ad361f09 | 248 | |
7a1c831e SP |
249 | if (stage2_pmd_table_empty(start_pmd)) |
250 | clear_stage2_pud_entry(kvm, pud, start_addr); | |
4f853a71 | 251 | } |
000d3996 | 252 | |
7a1c831e | 253 | static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, |
4f853a71 CD |
254 | phys_addr_t addr, phys_addr_t end) |
255 | { | |
256 | phys_addr_t next, start_addr = addr; | |
257 | pud_t *pud, *start_pud; | |
4f728276 | 258 | |
7a1c831e | 259 | start_pud = pud = stage2_pud_offset(pgd, addr); |
4f853a71 | 260 | do { |
7a1c831e SP |
261 | next = stage2_pud_addr_end(addr, end); |
262 | if (!stage2_pud_none(*pud)) { | |
263 | if (stage2_pud_huge(*pud)) { | |
363ef89f MZ |
264 | pud_t old_pud = *pud; |
265 | ||
7a1c831e | 266 | stage2_pud_clear(pud); |
4f853a71 | 267 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
363ef89f | 268 | kvm_flush_dcache_pud(old_pud); |
4f853a71 CD |
269 | put_page(virt_to_page(pud)); |
270 | } else { | |
7a1c831e | 271 | unmap_stage2_pmds(kvm, pud, addr, next); |
4f728276 MZ |
272 | } |
273 | } | |
4f853a71 | 274 | } while (pud++, addr = next, addr != end); |
4f728276 | 275 | |
7a1c831e SP |
276 | if (stage2_pud_table_empty(start_pud)) |
277 | clear_stage2_pgd_entry(kvm, pgd, start_addr); | |
4f853a71 CD |
278 | } |
279 | ||
7a1c831e SP |
280 | /** |
281 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | |
282 | * @kvm: The VM pointer | |
283 | * @start: The intermediate physical base address of the range to unmap | |
284 | * @size: The size of the area to unmap | |
285 | * | |
286 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
287 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
288 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
289 | * with things behind our backs. | |
290 | */ | |
291 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |
4f853a71 CD |
292 | { |
293 | pgd_t *pgd; | |
294 | phys_addr_t addr = start, end = start + size; | |
295 | phys_addr_t next; | |
296 | ||
8b3405e3 | 297 | assert_spin_locked(&kvm->mmu_lock); |
7a1c831e | 298 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
4f853a71 | 299 | do { |
0c428a6a SP |
300 | /* |
301 | * Make sure the page table is still active, as another thread | |
302 | * could have possibly freed the page table, while we released | |
303 | * the lock. | |
304 | */ | |
305 | if (!READ_ONCE(kvm->arch.pgd)) | |
306 | break; | |
7a1c831e SP |
307 | next = stage2_pgd_addr_end(addr, end); |
308 | if (!stage2_pgd_none(*pgd)) | |
309 | unmap_stage2_puds(kvm, pgd, addr, next); | |
8b3405e3 SP |
310 | /* |
311 | * If the range is too large, release the kvm->mmu_lock | |
312 | * to prevent starvation and lockup detector warnings. | |
313 | */ | |
314 | if (next != end) | |
315 | cond_resched_lock(&kvm->mmu_lock); | |
4f853a71 | 316 | } while (pgd++, addr = next, addr != end); |
000d3996 MZ |
317 | } |
318 | ||
9d218a1f MZ |
319 | static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, |
320 | phys_addr_t addr, phys_addr_t end) | |
321 | { | |
322 | pte_t *pte; | |
323 | ||
324 | pte = pte_offset_kernel(pmd, addr); | |
325 | do { | |
0de58f85 | 326 | if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte))) |
363ef89f | 327 | kvm_flush_dcache_pte(*pte); |
9d218a1f MZ |
328 | } while (pte++, addr += PAGE_SIZE, addr != end); |
329 | } | |
330 | ||
331 | static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |
332 | phys_addr_t addr, phys_addr_t end) | |
333 | { | |
334 | pmd_t *pmd; | |
335 | phys_addr_t next; | |
336 | ||
70fd1906 | 337 | pmd = stage2_pmd_offset(pud, addr); |
9d218a1f | 338 | do { |
70fd1906 | 339 | next = stage2_pmd_addr_end(addr, end); |
9d218a1f | 340 | if (!pmd_none(*pmd)) { |
bbb3b6b3 | 341 | if (pmd_thp_or_huge(*pmd)) |
363ef89f MZ |
342 | kvm_flush_dcache_pmd(*pmd); |
343 | else | |
9d218a1f | 344 | stage2_flush_ptes(kvm, pmd, addr, next); |
9d218a1f MZ |
345 | } |
346 | } while (pmd++, addr = next, addr != end); | |
347 | } | |
348 | ||
349 | static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, | |
350 | phys_addr_t addr, phys_addr_t end) | |
351 | { | |
352 | pud_t *pud; | |
353 | phys_addr_t next; | |
354 | ||
70fd1906 | 355 | pud = stage2_pud_offset(pgd, addr); |
9d218a1f | 356 | do { |
70fd1906 SP |
357 | next = stage2_pud_addr_end(addr, end); |
358 | if (!stage2_pud_none(*pud)) { | |
359 | if (stage2_pud_huge(*pud)) | |
363ef89f MZ |
360 | kvm_flush_dcache_pud(*pud); |
361 | else | |
9d218a1f | 362 | stage2_flush_pmds(kvm, pud, addr, next); |
9d218a1f MZ |
363 | } |
364 | } while (pud++, addr = next, addr != end); | |
365 | } | |
366 | ||
367 | static void stage2_flush_memslot(struct kvm *kvm, | |
368 | struct kvm_memory_slot *memslot) | |
369 | { | |
370 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
371 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | |
372 | phys_addr_t next; | |
373 | pgd_t *pgd; | |
374 | ||
70fd1906 | 375 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
9d218a1f | 376 | do { |
70fd1906 | 377 | next = stage2_pgd_addr_end(addr, end); |
9d218a1f MZ |
378 | stage2_flush_puds(kvm, pgd, addr, next); |
379 | } while (pgd++, addr = next, addr != end); | |
380 | } | |
381 | ||
382 | /** | |
383 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | |
384 | * @kvm: The struct kvm pointer | |
385 | * | |
386 | * Go through the stage 2 page tables and invalidate any cache lines | |
387 | * backing memory already mapped to the VM. | |
388 | */ | |
3c1e7165 | 389 | static void stage2_flush_vm(struct kvm *kvm) |
9d218a1f MZ |
390 | { |
391 | struct kvm_memslots *slots; | |
392 | struct kvm_memory_slot *memslot; | |
393 | int idx; | |
394 | ||
395 | idx = srcu_read_lock(&kvm->srcu); | |
396 | spin_lock(&kvm->mmu_lock); | |
397 | ||
398 | slots = kvm_memslots(kvm); | |
399 | kvm_for_each_memslot(memslot, slots) | |
400 | stage2_flush_memslot(kvm, memslot); | |
401 | ||
402 | spin_unlock(&kvm->mmu_lock); | |
403 | srcu_read_unlock(&kvm->srcu, idx); | |
404 | } | |
405 | ||
64f32497 SP |
406 | static void clear_hyp_pgd_entry(pgd_t *pgd) |
407 | { | |
408 | pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL); | |
409 | pgd_clear(pgd); | |
410 | pud_free(NULL, pud_table); | |
411 | put_page(virt_to_page(pgd)); | |
412 | } | |
413 | ||
414 | static void clear_hyp_pud_entry(pud_t *pud) | |
415 | { | |
416 | pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0); | |
417 | VM_BUG_ON(pud_huge(*pud)); | |
418 | pud_clear(pud); | |
419 | pmd_free(NULL, pmd_table); | |
420 | put_page(virt_to_page(pud)); | |
421 | } | |
422 | ||
423 | static void clear_hyp_pmd_entry(pmd_t *pmd) | |
424 | { | |
425 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | |
426 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); | |
427 | pmd_clear(pmd); | |
428 | pte_free_kernel(NULL, pte_table); | |
429 | put_page(virt_to_page(pmd)); | |
430 | } | |
431 | ||
432 | static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) | |
433 | { | |
434 | pte_t *pte, *start_pte; | |
435 | ||
436 | start_pte = pte = pte_offset_kernel(pmd, addr); | |
437 | do { | |
438 | if (!pte_none(*pte)) { | |
439 | kvm_set_pte(pte, __pte(0)); | |
440 | put_page(virt_to_page(pte)); | |
441 | } | |
442 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
443 | ||
444 | if (hyp_pte_table_empty(start_pte)) | |
445 | clear_hyp_pmd_entry(pmd); | |
446 | } | |
447 | ||
448 | static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) | |
449 | { | |
450 | phys_addr_t next; | |
451 | pmd_t *pmd, *start_pmd; | |
452 | ||
453 | start_pmd = pmd = pmd_offset(pud, addr); | |
454 | do { | |
455 | next = pmd_addr_end(addr, end); | |
456 | /* Hyp doesn't use huge pmds */ | |
457 | if (!pmd_none(*pmd)) | |
458 | unmap_hyp_ptes(pmd, addr, next); | |
459 | } while (pmd++, addr = next, addr != end); | |
460 | ||
461 | if (hyp_pmd_table_empty(start_pmd)) | |
462 | clear_hyp_pud_entry(pud); | |
463 | } | |
464 | ||
465 | static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) | |
466 | { | |
467 | phys_addr_t next; | |
468 | pud_t *pud, *start_pud; | |
469 | ||
470 | start_pud = pud = pud_offset(pgd, addr); | |
471 | do { | |
472 | next = pud_addr_end(addr, end); | |
473 | /* Hyp doesn't use huge puds */ | |
474 | if (!pud_none(*pud)) | |
475 | unmap_hyp_pmds(pud, addr, next); | |
476 | } while (pud++, addr = next, addr != end); | |
477 | ||
478 | if (hyp_pud_table_empty(start_pud)) | |
479 | clear_hyp_pgd_entry(pgd); | |
480 | } | |
481 | ||
482 | static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) | |
483 | { | |
484 | pgd_t *pgd; | |
485 | phys_addr_t addr = start, end = start + size; | |
486 | phys_addr_t next; | |
487 | ||
488 | /* | |
489 | * We don't unmap anything from HYP, except at the hyp tear down. | |
490 | * Hence, we don't have to invalidate the TLBs here. | |
491 | */ | |
492 | pgd = pgdp + pgd_index(addr); | |
493 | do { | |
494 | next = pgd_addr_end(addr, end); | |
495 | if (!pgd_none(*pgd)) | |
496 | unmap_hyp_puds(pgd, addr, next); | |
497 | } while (pgd++, addr = next, addr != end); | |
498 | } | |
499 | ||
342cd0ab | 500 | /** |
4f728276 | 501 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 502 | * |
5a677ce0 MZ |
503 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
504 | * therefore contains either mappings in the kernel memory area (above | |
505 | * PAGE_OFFSET), or device mappings in the vmalloc range (from | |
506 | * VMALLOC_START to VMALLOC_END). | |
507 | * | |
508 | * boot_hyp_pgd should only map two pages for the init code. | |
342cd0ab | 509 | */ |
4f728276 | 510 | void free_hyp_pgds(void) |
342cd0ab | 511 | { |
342cd0ab CD |
512 | unsigned long addr; |
513 | ||
d157f4a5 | 514 | mutex_lock(&kvm_hyp_pgd_mutex); |
5a677ce0 | 515 | |
26781f9c MZ |
516 | if (boot_hyp_pgd) { |
517 | unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); | |
518 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); | |
519 | boot_hyp_pgd = NULL; | |
520 | } | |
521 | ||
4f728276 | 522 | if (hyp_pgd) { |
26781f9c | 523 | unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
4f728276 | 524 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
6c41a413 | 525 | unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); |
4f728276 | 526 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
6c41a413 | 527 | unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); |
d4cb9df5 | 528 | |
38f791a4 | 529 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
d157f4a5 | 530 | hyp_pgd = NULL; |
4f728276 | 531 | } |
e4c5a685 AB |
532 | if (merged_hyp_pgd) { |
533 | clear_page(merged_hyp_pgd); | |
534 | free_page((unsigned long)merged_hyp_pgd); | |
535 | merged_hyp_pgd = NULL; | |
536 | } | |
4f728276 | 537 | |
342cd0ab CD |
538 | mutex_unlock(&kvm_hyp_pgd_mutex); |
539 | } | |
540 | ||
541 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |
6060df84 MZ |
542 | unsigned long end, unsigned long pfn, |
543 | pgprot_t prot) | |
342cd0ab CD |
544 | { |
545 | pte_t *pte; | |
546 | unsigned long addr; | |
342cd0ab | 547 | |
3562c76d MZ |
548 | addr = start; |
549 | do { | |
6060df84 MZ |
550 | pte = pte_offset_kernel(pmd, addr); |
551 | kvm_set_pte(pte, pfn_pte(pfn, prot)); | |
4f728276 | 552 | get_page(virt_to_page(pte)); |
5a677ce0 | 553 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
6060df84 | 554 | pfn++; |
3562c76d | 555 | } while (addr += PAGE_SIZE, addr != end); |
342cd0ab CD |
556 | } |
557 | ||
558 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |
6060df84 MZ |
559 | unsigned long end, unsigned long pfn, |
560 | pgprot_t prot) | |
342cd0ab CD |
561 | { |
562 | pmd_t *pmd; | |
563 | pte_t *pte; | |
564 | unsigned long addr, next; | |
565 | ||
3562c76d MZ |
566 | addr = start; |
567 | do { | |
6060df84 | 568 | pmd = pmd_offset(pud, addr); |
342cd0ab CD |
569 | |
570 | BUG_ON(pmd_sect(*pmd)); | |
571 | ||
572 | if (pmd_none(*pmd)) { | |
6060df84 | 573 | pte = pte_alloc_one_kernel(NULL, addr); |
342cd0ab CD |
574 | if (!pte) { |
575 | kvm_err("Cannot allocate Hyp pte\n"); | |
576 | return -ENOMEM; | |
577 | } | |
578 | pmd_populate_kernel(NULL, pmd, pte); | |
4f728276 | 579 | get_page(virt_to_page(pmd)); |
5a677ce0 | 580 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); |
342cd0ab CD |
581 | } |
582 | ||
583 | next = pmd_addr_end(addr, end); | |
584 | ||
6060df84 MZ |
585 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
586 | pfn += (next - addr) >> PAGE_SHIFT; | |
3562c76d | 587 | } while (addr = next, addr != end); |
342cd0ab CD |
588 | |
589 | return 0; | |
590 | } | |
591 | ||
38f791a4 CD |
592 | static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, |
593 | unsigned long end, unsigned long pfn, | |
594 | pgprot_t prot) | |
595 | { | |
596 | pud_t *pud; | |
597 | pmd_t *pmd; | |
598 | unsigned long addr, next; | |
599 | int ret; | |
600 | ||
601 | addr = start; | |
602 | do { | |
603 | pud = pud_offset(pgd, addr); | |
604 | ||
605 | if (pud_none_or_clear_bad(pud)) { | |
606 | pmd = pmd_alloc_one(NULL, addr); | |
607 | if (!pmd) { | |
608 | kvm_err("Cannot allocate Hyp pmd\n"); | |
609 | return -ENOMEM; | |
610 | } | |
611 | pud_populate(NULL, pud, pmd); | |
612 | get_page(virt_to_page(pud)); | |
613 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); | |
614 | } | |
615 | ||
616 | next = pud_addr_end(addr, end); | |
617 | ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); | |
618 | if (ret) | |
619 | return ret; | |
620 | pfn += (next - addr) >> PAGE_SHIFT; | |
621 | } while (addr = next, addr != end); | |
622 | ||
623 | return 0; | |
624 | } | |
625 | ||
6060df84 MZ |
626 | static int __create_hyp_mappings(pgd_t *pgdp, |
627 | unsigned long start, unsigned long end, | |
628 | unsigned long pfn, pgprot_t prot) | |
342cd0ab | 629 | { |
342cd0ab CD |
630 | pgd_t *pgd; |
631 | pud_t *pud; | |
342cd0ab CD |
632 | unsigned long addr, next; |
633 | int err = 0; | |
634 | ||
342cd0ab | 635 | mutex_lock(&kvm_hyp_pgd_mutex); |
3562c76d MZ |
636 | addr = start & PAGE_MASK; |
637 | end = PAGE_ALIGN(end); | |
638 | do { | |
6060df84 | 639 | pgd = pgdp + pgd_index(addr); |
342cd0ab | 640 | |
38f791a4 CD |
641 | if (pgd_none(*pgd)) { |
642 | pud = pud_alloc_one(NULL, addr); | |
643 | if (!pud) { | |
644 | kvm_err("Cannot allocate Hyp pud\n"); | |
342cd0ab CD |
645 | err = -ENOMEM; |
646 | goto out; | |
647 | } | |
38f791a4 CD |
648 | pgd_populate(NULL, pgd, pud); |
649 | get_page(virt_to_page(pgd)); | |
650 | kvm_flush_dcache_to_poc(pgd, sizeof(*pgd)); | |
342cd0ab CD |
651 | } |
652 | ||
653 | next = pgd_addr_end(addr, end); | |
38f791a4 | 654 | err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot); |
342cd0ab CD |
655 | if (err) |
656 | goto out; | |
6060df84 | 657 | pfn += (next - addr) >> PAGE_SHIFT; |
3562c76d | 658 | } while (addr = next, addr != end); |
342cd0ab CD |
659 | out: |
660 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
661 | return err; | |
662 | } | |
663 | ||
40c2729b CD |
664 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
665 | { | |
666 | if (!is_vmalloc_addr(kaddr)) { | |
667 | BUG_ON(!virt_addr_valid(kaddr)); | |
668 | return __pa(kaddr); | |
669 | } else { | |
670 | return page_to_phys(vmalloc_to_page(kaddr)) + | |
671 | offset_in_page(kaddr); | |
672 | } | |
673 | } | |
674 | ||
342cd0ab | 675 | /** |
06e8c3b0 | 676 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
677 | * @from: The virtual kernel start address of the range |
678 | * @to: The virtual kernel end address of the range (exclusive) | |
c8dddecd | 679 | * @prot: The protection to be applied to this range |
342cd0ab | 680 | * |
06e8c3b0 MZ |
681 | * The same virtual address as the kernel virtual address is also used |
682 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
683 | * physical pages. | |
342cd0ab | 684 | */ |
c8dddecd | 685 | int create_hyp_mappings(void *from, void *to, pgprot_t prot) |
342cd0ab | 686 | { |
40c2729b CD |
687 | phys_addr_t phys_addr; |
688 | unsigned long virt_addr; | |
6c41a413 MZ |
689 | unsigned long start = kern_hyp_va((unsigned long)from); |
690 | unsigned long end = kern_hyp_va((unsigned long)to); | |
6060df84 | 691 | |
1e947bad MZ |
692 | if (is_kernel_in_hyp_mode()) |
693 | return 0; | |
694 | ||
40c2729b CD |
695 | start = start & PAGE_MASK; |
696 | end = PAGE_ALIGN(end); | |
6060df84 | 697 | |
40c2729b CD |
698 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
699 | int err; | |
6060df84 | 700 | |
40c2729b CD |
701 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
702 | err = __create_hyp_mappings(hyp_pgd, virt_addr, | |
703 | virt_addr + PAGE_SIZE, | |
704 | __phys_to_pfn(phys_addr), | |
c8dddecd | 705 | prot); |
40c2729b CD |
706 | if (err) |
707 | return err; | |
708 | } | |
709 | ||
710 | return 0; | |
342cd0ab CD |
711 | } |
712 | ||
713 | /** | |
06e8c3b0 MZ |
714 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
715 | * @from: The kernel start VA of the range | |
716 | * @to: The kernel end VA of the range (exclusive) | |
6060df84 | 717 | * @phys_addr: The physical start address which gets mapped |
06e8c3b0 MZ |
718 | * |
719 | * The resulting HYP VA is the same as the kernel VA, modulo | |
720 | * HYP_PAGE_OFFSET. | |
342cd0ab | 721 | */ |
6060df84 | 722 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
342cd0ab | 723 | { |
6c41a413 MZ |
724 | unsigned long start = kern_hyp_va((unsigned long)from); |
725 | unsigned long end = kern_hyp_va((unsigned long)to); | |
6060df84 | 726 | |
1e947bad MZ |
727 | if (is_kernel_in_hyp_mode()) |
728 | return 0; | |
729 | ||
6060df84 MZ |
730 | /* Check for a valid kernel IO mapping */ |
731 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) | |
732 | return -EINVAL; | |
733 | ||
734 | return __create_hyp_mappings(hyp_pgd, start, end, | |
735 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | |
342cd0ab CD |
736 | } |
737 | ||
d5d8184d CD |
738 | /** |
739 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | |
740 | * @kvm: The KVM struct pointer for the VM. | |
741 | * | |
9d4dc688 VM |
742 | * Allocates only the stage-2 HW PGD level table(s) (can support either full |
743 | * 40-bit input addresses or limited to 32-bit input addresses). Clears the | |
744 | * allocated pages. | |
d5d8184d CD |
745 | * |
746 | * Note we don't need locking here as this is only called when the VM is | |
747 | * created, which can only be done once. | |
748 | */ | |
749 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | |
750 | { | |
751 | pgd_t *pgd; | |
752 | ||
753 | if (kvm->arch.pgd != NULL) { | |
754 | kvm_err("kvm_arch already initialized?\n"); | |
755 | return -EINVAL; | |
756 | } | |
757 | ||
9163ee23 SP |
758 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ |
759 | pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO); | |
760 | if (!pgd) | |
a987370f MZ |
761 | return -ENOMEM; |
762 | ||
d5d8184d | 763 | kvm->arch.pgd = pgd; |
d5d8184d CD |
764 | return 0; |
765 | } | |
766 | ||
957db105 CD |
767 | static void stage2_unmap_memslot(struct kvm *kvm, |
768 | struct kvm_memory_slot *memslot) | |
769 | { | |
770 | hva_t hva = memslot->userspace_addr; | |
771 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
772 | phys_addr_t size = PAGE_SIZE * memslot->npages; | |
773 | hva_t reg_end = hva + size; | |
774 | ||
775 | /* | |
776 | * A memory region could potentially cover multiple VMAs, and any holes | |
777 | * between them, so iterate over all of them to find out if we should | |
778 | * unmap any of them. | |
779 | * | |
780 | * +--------------------------------------------+ | |
781 | * +---------------+----------------+ +----------------+ | |
782 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
783 | * +---------------+----------------+ +----------------+ | |
784 | * | memory region | | |
785 | * +--------------------------------------------+ | |
786 | */ | |
787 | do { | |
788 | struct vm_area_struct *vma = find_vma(current->mm, hva); | |
789 | hva_t vm_start, vm_end; | |
790 | ||
791 | if (!vma || vma->vm_start >= reg_end) | |
792 | break; | |
793 | ||
794 | /* | |
795 | * Take the intersection of this VMA with the memory region | |
796 | */ | |
797 | vm_start = max(hva, vma->vm_start); | |
798 | vm_end = min(reg_end, vma->vm_end); | |
799 | ||
800 | if (!(vma->vm_flags & VM_PFNMAP)) { | |
801 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | |
802 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); | |
803 | } | |
804 | hva = vm_end; | |
805 | } while (hva < reg_end); | |
806 | } | |
807 | ||
808 | /** | |
809 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | |
810 | * @kvm: The struct kvm pointer | |
811 | * | |
812 | * Go through the memregions and unmap any reguler RAM | |
813 | * backing memory already mapped to the VM. | |
814 | */ | |
815 | void stage2_unmap_vm(struct kvm *kvm) | |
816 | { | |
817 | struct kvm_memslots *slots; | |
818 | struct kvm_memory_slot *memslot; | |
819 | int idx; | |
820 | ||
821 | idx = srcu_read_lock(&kvm->srcu); | |
90f6e150 | 822 | down_read(¤t->mm->mmap_sem); |
957db105 CD |
823 | spin_lock(&kvm->mmu_lock); |
824 | ||
825 | slots = kvm_memslots(kvm); | |
826 | kvm_for_each_memslot(memslot, slots) | |
827 | stage2_unmap_memslot(kvm, memslot); | |
828 | ||
829 | spin_unlock(&kvm->mmu_lock); | |
90f6e150 | 830 | up_read(¤t->mm->mmap_sem); |
957db105 CD |
831 | srcu_read_unlock(&kvm->srcu, idx); |
832 | } | |
833 | ||
d5d8184d CD |
834 | /** |
835 | * kvm_free_stage2_pgd - free all stage-2 tables | |
836 | * @kvm: The KVM struct pointer for the VM. | |
837 | * | |
838 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | |
839 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | |
840 | * and setting the struct pointer to NULL. | |
d5d8184d CD |
841 | */ |
842 | void kvm_free_stage2_pgd(struct kvm *kvm) | |
843 | { | |
6c0d706b | 844 | void *pgd = NULL; |
d5d8184d | 845 | |
8b3405e3 | 846 | spin_lock(&kvm->mmu_lock); |
6c0d706b SP |
847 | if (kvm->arch.pgd) { |
848 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | |
2952a607 | 849 | pgd = READ_ONCE(kvm->arch.pgd); |
6c0d706b SP |
850 | kvm->arch.pgd = NULL; |
851 | } | |
8b3405e3 SP |
852 | spin_unlock(&kvm->mmu_lock); |
853 | ||
9163ee23 | 854 | /* Free the HW pgd, one page at a time */ |
6c0d706b SP |
855 | if (pgd) |
856 | free_pages_exact(pgd, S2_PGD_SIZE); | |
d5d8184d CD |
857 | } |
858 | ||
38f791a4 | 859 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
ad361f09 | 860 | phys_addr_t addr) |
d5d8184d CD |
861 | { |
862 | pgd_t *pgd; | |
863 | pud_t *pud; | |
d5d8184d | 864 | |
70fd1906 SP |
865 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
866 | if (WARN_ON(stage2_pgd_none(*pgd))) { | |
38f791a4 CD |
867 | if (!cache) |
868 | return NULL; | |
869 | pud = mmu_memory_cache_alloc(cache); | |
70fd1906 | 870 | stage2_pgd_populate(pgd, pud); |
38f791a4 CD |
871 | get_page(virt_to_page(pgd)); |
872 | } | |
873 | ||
70fd1906 | 874 | return stage2_pud_offset(pgd, addr); |
38f791a4 CD |
875 | } |
876 | ||
877 | static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
878 | phys_addr_t addr) | |
879 | { | |
880 | pud_t *pud; | |
881 | pmd_t *pmd; | |
882 | ||
883 | pud = stage2_get_pud(kvm, cache, addr); | |
d6dbdd3c MZ |
884 | if (!pud) |
885 | return NULL; | |
886 | ||
70fd1906 | 887 | if (stage2_pud_none(*pud)) { |
d5d8184d | 888 | if (!cache) |
ad361f09 | 889 | return NULL; |
d5d8184d | 890 | pmd = mmu_memory_cache_alloc(cache); |
70fd1906 | 891 | stage2_pud_populate(pud, pmd); |
d5d8184d | 892 | get_page(virt_to_page(pud)); |
c62ee2b2 MZ |
893 | } |
894 | ||
70fd1906 | 895 | return stage2_pmd_offset(pud, addr); |
ad361f09 CD |
896 | } |
897 | ||
898 | static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | |
899 | *cache, phys_addr_t addr, const pmd_t *new_pmd) | |
900 | { | |
901 | pmd_t *pmd, old_pmd; | |
902 | ||
903 | pmd = stage2_get_pmd(kvm, cache, addr); | |
904 | VM_BUG_ON(!pmd); | |
d5d8184d | 905 | |
ad361f09 CD |
906 | /* |
907 | * Mapping in huge pages should only happen through a fault. If a | |
908 | * page is merged into a transparent huge page, the individual | |
909 | * subpages of that huge page should be unmapped through MMU | |
910 | * notifiers before we get here. | |
911 | * | |
912 | * Merging of CompoundPages is not supported; they should become | |
913 | * splitting first, unmapped, merged, and mapped back in on-demand. | |
914 | */ | |
915 | VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); | |
916 | ||
917 | old_pmd = *pmd; | |
d4b9e079 MZ |
918 | if (pmd_present(old_pmd)) { |
919 | pmd_clear(pmd); | |
ad361f09 | 920 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d4b9e079 | 921 | } else { |
ad361f09 | 922 | get_page(virt_to_page(pmd)); |
d4b9e079 MZ |
923 | } |
924 | ||
925 | kvm_set_pmd(pmd, *new_pmd); | |
ad361f09 CD |
926 | return 0; |
927 | } | |
928 | ||
929 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
15a49a44 MS |
930 | phys_addr_t addr, const pte_t *new_pte, |
931 | unsigned long flags) | |
ad361f09 CD |
932 | { |
933 | pmd_t *pmd; | |
934 | pte_t *pte, old_pte; | |
15a49a44 MS |
935 | bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP; |
936 | bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE; | |
937 | ||
938 | VM_BUG_ON(logging_active && !cache); | |
ad361f09 | 939 | |
38f791a4 | 940 | /* Create stage-2 page table mapping - Levels 0 and 1 */ |
ad361f09 CD |
941 | pmd = stage2_get_pmd(kvm, cache, addr); |
942 | if (!pmd) { | |
943 | /* | |
944 | * Ignore calls from kvm_set_spte_hva for unallocated | |
945 | * address ranges. | |
946 | */ | |
947 | return 0; | |
948 | } | |
949 | ||
15a49a44 MS |
950 | /* |
951 | * While dirty page logging - dissolve huge PMD, then continue on to | |
952 | * allocate page. | |
953 | */ | |
954 | if (logging_active) | |
955 | stage2_dissolve_pmd(kvm, addr, pmd); | |
956 | ||
ad361f09 | 957 | /* Create stage-2 page mappings - Level 2 */ |
d5d8184d CD |
958 | if (pmd_none(*pmd)) { |
959 | if (!cache) | |
960 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
961 | pte = mmu_memory_cache_alloc(cache); | |
d5d8184d | 962 | pmd_populate_kernel(NULL, pmd, pte); |
d5d8184d | 963 | get_page(virt_to_page(pmd)); |
c62ee2b2 MZ |
964 | } |
965 | ||
966 | pte = pte_offset_kernel(pmd, addr); | |
d5d8184d CD |
967 | |
968 | if (iomap && pte_present(*pte)) | |
969 | return -EFAULT; | |
970 | ||
971 | /* Create 2nd stage page table mapping - Level 3 */ | |
972 | old_pte = *pte; | |
d4b9e079 MZ |
973 | if (pte_present(old_pte)) { |
974 | kvm_set_pte(pte, __pte(0)); | |
48762767 | 975 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d4b9e079 | 976 | } else { |
d5d8184d | 977 | get_page(virt_to_page(pte)); |
d4b9e079 | 978 | } |
d5d8184d | 979 | |
d4b9e079 | 980 | kvm_set_pte(pte, *new_pte); |
d5d8184d CD |
981 | return 0; |
982 | } | |
d5d8184d | 983 | |
06485053 CM |
984 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
985 | static int stage2_ptep_test_and_clear_young(pte_t *pte) | |
986 | { | |
987 | if (pte_young(*pte)) { | |
988 | *pte = pte_mkold(*pte); | |
989 | return 1; | |
990 | } | |
d5d8184d CD |
991 | return 0; |
992 | } | |
06485053 CM |
993 | #else |
994 | static int stage2_ptep_test_and_clear_young(pte_t *pte) | |
995 | { | |
996 | return __ptep_test_and_clear_young(pte); | |
997 | } | |
998 | #endif | |
999 | ||
1000 | static int stage2_pmdp_test_and_clear_young(pmd_t *pmd) | |
1001 | { | |
1002 | return stage2_ptep_test_and_clear_young((pte_t *)pmd); | |
1003 | } | |
d5d8184d CD |
1004 | |
1005 | /** | |
1006 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
1007 | * | |
1008 | * @kvm: The KVM pointer | |
1009 | * @guest_ipa: The IPA at which to insert the mapping | |
1010 | * @pa: The physical address of the device | |
1011 | * @size: The size of the mapping | |
1012 | */ | |
1013 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
c40f2f8f | 1014 | phys_addr_t pa, unsigned long size, bool writable) |
d5d8184d CD |
1015 | { |
1016 | phys_addr_t addr, end; | |
1017 | int ret = 0; | |
1018 | unsigned long pfn; | |
1019 | struct kvm_mmu_memory_cache cache = { 0, }; | |
1020 | ||
1021 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | |
1022 | pfn = __phys_to_pfn(pa); | |
1023 | ||
1024 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | |
c62ee2b2 | 1025 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
d5d8184d | 1026 | |
c40f2f8f | 1027 | if (writable) |
06485053 | 1028 | pte = kvm_s2pte_mkwrite(pte); |
c40f2f8f | 1029 | |
38f791a4 CD |
1030 | ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, |
1031 | KVM_NR_MEM_OBJS); | |
d5d8184d CD |
1032 | if (ret) |
1033 | goto out; | |
1034 | spin_lock(&kvm->mmu_lock); | |
15a49a44 MS |
1035 | ret = stage2_set_pte(kvm, &cache, addr, &pte, |
1036 | KVM_S2PTE_FLAG_IS_IOMAP); | |
d5d8184d CD |
1037 | spin_unlock(&kvm->mmu_lock); |
1038 | if (ret) | |
1039 | goto out; | |
1040 | ||
1041 | pfn++; | |
1042 | } | |
1043 | ||
1044 | out: | |
1045 | mmu_free_memory_cache(&cache); | |
1046 | return ret; | |
1047 | } | |
1048 | ||
ba049e93 | 1049 | static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) |
9b5fdb97 | 1050 | { |
ba049e93 | 1051 | kvm_pfn_t pfn = *pfnp; |
9b5fdb97 CD |
1052 | gfn_t gfn = *ipap >> PAGE_SHIFT; |
1053 | ||
127393fb | 1054 | if (PageTransCompoundMap(pfn_to_page(pfn))) { |
9b5fdb97 CD |
1055 | unsigned long mask; |
1056 | /* | |
1057 | * The address we faulted on is backed by a transparent huge | |
1058 | * page. However, because we map the compound huge page and | |
1059 | * not the individual tail page, we need to transfer the | |
1060 | * refcount to the head page. We have to be careful that the | |
1061 | * THP doesn't start to split while we are adjusting the | |
1062 | * refcounts. | |
1063 | * | |
1064 | * We are sure this doesn't happen, because mmu_notifier_retry | |
1065 | * was successful and we are holding the mmu_lock, so if this | |
1066 | * THP is trying to split, it will be blocked in the mmu | |
1067 | * notifier before touching any of the pages, specifically | |
1068 | * before being able to call __split_huge_page_refcount(). | |
1069 | * | |
1070 | * We can therefore safely transfer the refcount from PG_tail | |
1071 | * to PG_head and switch the pfn from a tail page to the head | |
1072 | * page accordingly. | |
1073 | */ | |
1074 | mask = PTRS_PER_PMD - 1; | |
1075 | VM_BUG_ON((gfn & mask) != (pfn & mask)); | |
1076 | if (pfn & mask) { | |
1077 | *ipap &= PMD_MASK; | |
1078 | kvm_release_pfn_clean(pfn); | |
1079 | pfn &= ~mask; | |
1080 | kvm_get_pfn(pfn); | |
1081 | *pfnp = pfn; | |
1082 | } | |
1083 | ||
1084 | return true; | |
1085 | } | |
1086 | ||
1087 | return false; | |
1088 | } | |
1089 | ||
a7d079ce AB |
1090 | static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
1091 | { | |
1092 | if (kvm_vcpu_trap_is_iabt(vcpu)) | |
1093 | return false; | |
1094 | ||
1095 | return kvm_vcpu_dabt_iswrite(vcpu); | |
1096 | } | |
1097 | ||
c6473555 MS |
1098 | /** |
1099 | * stage2_wp_ptes - write protect PMD range | |
1100 | * @pmd: pointer to pmd entry | |
1101 | * @addr: range start address | |
1102 | * @end: range end address | |
1103 | */ | |
1104 | static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) | |
1105 | { | |
1106 | pte_t *pte; | |
1107 | ||
1108 | pte = pte_offset_kernel(pmd, addr); | |
1109 | do { | |
1110 | if (!pte_none(*pte)) { | |
1111 | if (!kvm_s2pte_readonly(pte)) | |
1112 | kvm_set_s2pte_readonly(pte); | |
1113 | } | |
1114 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
1115 | } | |
1116 | ||
1117 | /** | |
1118 | * stage2_wp_pmds - write protect PUD range | |
1119 | * @pud: pointer to pud entry | |
1120 | * @addr: range start address | |
1121 | * @end: range end address | |
1122 | */ | |
1123 | static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) | |
1124 | { | |
1125 | pmd_t *pmd; | |
1126 | phys_addr_t next; | |
1127 | ||
70fd1906 | 1128 | pmd = stage2_pmd_offset(pud, addr); |
c6473555 MS |
1129 | |
1130 | do { | |
70fd1906 | 1131 | next = stage2_pmd_addr_end(addr, end); |
c6473555 | 1132 | if (!pmd_none(*pmd)) { |
bbb3b6b3 | 1133 | if (pmd_thp_or_huge(*pmd)) { |
c6473555 MS |
1134 | if (!kvm_s2pmd_readonly(pmd)) |
1135 | kvm_set_s2pmd_readonly(pmd); | |
1136 | } else { | |
1137 | stage2_wp_ptes(pmd, addr, next); | |
1138 | } | |
1139 | } | |
1140 | } while (pmd++, addr = next, addr != end); | |
1141 | } | |
1142 | ||
1143 | /** | |
1144 | * stage2_wp_puds - write protect PGD range | |
1145 | * @pgd: pointer to pgd entry | |
1146 | * @addr: range start address | |
1147 | * @end: range end address | |
1148 | * | |
1149 | * Process PUD entries, for a huge PUD we cause a panic. | |
1150 | */ | |
1151 | static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) | |
1152 | { | |
1153 | pud_t *pud; | |
1154 | phys_addr_t next; | |
1155 | ||
70fd1906 | 1156 | pud = stage2_pud_offset(pgd, addr); |
c6473555 | 1157 | do { |
70fd1906 SP |
1158 | next = stage2_pud_addr_end(addr, end); |
1159 | if (!stage2_pud_none(*pud)) { | |
c6473555 | 1160 | /* TODO:PUD not supported, revisit later if supported */ |
70fd1906 | 1161 | BUG_ON(stage2_pud_huge(*pud)); |
c6473555 MS |
1162 | stage2_wp_pmds(pud, addr, next); |
1163 | } | |
1164 | } while (pud++, addr = next, addr != end); | |
1165 | } | |
1166 | ||
1167 | /** | |
1168 | * stage2_wp_range() - write protect stage2 memory region range | |
1169 | * @kvm: The KVM pointer | |
1170 | * @addr: Start address of range | |
1171 | * @end: End address of range | |
1172 | */ | |
1173 | static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |
1174 | { | |
1175 | pgd_t *pgd; | |
1176 | phys_addr_t next; | |
1177 | ||
70fd1906 | 1178 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
c6473555 MS |
1179 | do { |
1180 | /* | |
1181 | * Release kvm_mmu_lock periodically if the memory region is | |
1182 | * large. Otherwise, we may see kernel panics with | |
227ea818 CD |
1183 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, |
1184 | * CONFIG_LOCKDEP. Additionally, holding the lock too long | |
0c428a6a SP |
1185 | * will also starve other vCPUs. We have to also make sure |
1186 | * that the page tables are not freed while we released | |
1187 | * the lock. | |
c6473555 | 1188 | */ |
0c428a6a SP |
1189 | cond_resched_lock(&kvm->mmu_lock); |
1190 | if (!READ_ONCE(kvm->arch.pgd)) | |
1191 | break; | |
70fd1906 SP |
1192 | next = stage2_pgd_addr_end(addr, end); |
1193 | if (stage2_pgd_present(*pgd)) | |
c6473555 MS |
1194 | stage2_wp_puds(pgd, addr, next); |
1195 | } while (pgd++, addr = next, addr != end); | |
1196 | } | |
1197 | ||
1198 | /** | |
1199 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot | |
1200 | * @kvm: The KVM pointer | |
1201 | * @slot: The memory slot to write protect | |
1202 | * | |
1203 | * Called to start logging dirty pages after memory region | |
1204 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns | |
1205 | * all present PMD and PTEs are write protected in the memory region. | |
1206 | * Afterwards read of dirty page log can be called. | |
1207 | * | |
1208 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, | |
1209 | * serializing operations for VM memory regions. | |
1210 | */ | |
1211 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) | |
1212 | { | |
9f6b8029 PB |
1213 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1214 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); | |
c6473555 MS |
1215 | phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; |
1216 | phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; | |
1217 | ||
1218 | spin_lock(&kvm->mmu_lock); | |
1219 | stage2_wp_range(kvm, start, end); | |
1220 | spin_unlock(&kvm->mmu_lock); | |
1221 | kvm_flush_remote_tlbs(kvm); | |
1222 | } | |
53c810c3 MS |
1223 | |
1224 | /** | |
3b0f1d01 | 1225 | * kvm_mmu_write_protect_pt_masked() - write protect dirty pages |
53c810c3 MS |
1226 | * @kvm: The KVM pointer |
1227 | * @slot: The memory slot associated with mask | |
1228 | * @gfn_offset: The gfn offset in memory slot | |
1229 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory | |
1230 | * slot to be write protected | |
1231 | * | |
1232 | * Walks bits set in mask write protects the associated pte's. Caller must | |
1233 | * acquire kvm_mmu_lock. | |
1234 | */ | |
3b0f1d01 | 1235 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
53c810c3 MS |
1236 | struct kvm_memory_slot *slot, |
1237 | gfn_t gfn_offset, unsigned long mask) | |
1238 | { | |
1239 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; | |
1240 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; | |
1241 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; | |
1242 | ||
1243 | stage2_wp_range(kvm, start, end); | |
1244 | } | |
c6473555 | 1245 | |
3b0f1d01 KH |
1246 | /* |
1247 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected | |
1248 | * dirty pages. | |
1249 | * | |
1250 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to | |
1251 | * enable dirty logging for them. | |
1252 | */ | |
1253 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, | |
1254 | struct kvm_memory_slot *slot, | |
1255 | gfn_t gfn_offset, unsigned long mask) | |
1256 | { | |
1257 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); | |
1258 | } | |
1259 | ||
ba049e93 | 1260 | static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, |
13b7756c | 1261 | unsigned long size) |
0d3e4d4f | 1262 | { |
13b7756c | 1263 | __coherent_cache_guest_page(vcpu, pfn, size); |
0d3e4d4f MZ |
1264 | } |
1265 | ||
196f878a JM |
1266 | static void kvm_send_hwpoison_signal(unsigned long address, |
1267 | struct vm_area_struct *vma) | |
1268 | { | |
1269 | siginfo_t info; | |
1270 | ||
1271 | info.si_signo = SIGBUS; | |
1272 | info.si_errno = 0; | |
1273 | info.si_code = BUS_MCEERR_AR; | |
1274 | info.si_addr = (void __user *)address; | |
1275 | ||
1276 | if (is_vm_hugetlb_page(vma)) | |
1277 | info.si_addr_lsb = huge_page_shift(hstate_vma(vma)); | |
1278 | else | |
1279 | info.si_addr_lsb = PAGE_SHIFT; | |
1280 | ||
1281 | send_sig_info(SIGBUS, &info, current); | |
1282 | } | |
1283 | ||
94f8e641 | 1284 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
98047888 | 1285 | struct kvm_memory_slot *memslot, unsigned long hva, |
94f8e641 CD |
1286 | unsigned long fault_status) |
1287 | { | |
94f8e641 | 1288 | int ret; |
9b5fdb97 | 1289 | bool write_fault, writable, hugetlb = false, force_pte = false; |
94f8e641 | 1290 | unsigned long mmu_seq; |
ad361f09 | 1291 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
ad361f09 | 1292 | struct kvm *kvm = vcpu->kvm; |
94f8e641 | 1293 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
ad361f09 | 1294 | struct vm_area_struct *vma; |
ba049e93 | 1295 | kvm_pfn_t pfn; |
b8865767 | 1296 | pgprot_t mem_type = PAGE_S2; |
15a49a44 MS |
1297 | bool logging_active = memslot_is_logging(memslot); |
1298 | unsigned long flags = 0; | |
94f8e641 | 1299 | |
a7d079ce | 1300 | write_fault = kvm_is_write_fault(vcpu); |
94f8e641 CD |
1301 | if (fault_status == FSC_PERM && !write_fault) { |
1302 | kvm_err("Unexpected L2 read permission error\n"); | |
1303 | return -EFAULT; | |
1304 | } | |
1305 | ||
ad361f09 CD |
1306 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
1307 | down_read(¤t->mm->mmap_sem); | |
1308 | vma = find_vma_intersection(current->mm, hva, hva + 1); | |
37b54408 AB |
1309 | if (unlikely(!vma)) { |
1310 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); | |
1311 | up_read(¤t->mm->mmap_sem); | |
1312 | return -EFAULT; | |
1313 | } | |
1314 | ||
15a49a44 | 1315 | if (is_vm_hugetlb_page(vma) && !logging_active) { |
ad361f09 CD |
1316 | hugetlb = true; |
1317 | gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; | |
9b5fdb97 CD |
1318 | } else { |
1319 | /* | |
136d737f MZ |
1320 | * Pages belonging to memslots that don't have the same |
1321 | * alignment for userspace and IPA cannot be mapped using | |
1322 | * block descriptors even if the pages belong to a THP for | |
1323 | * the process, because the stage-2 block descriptor will | |
1324 | * cover more than a single THP and we loose atomicity for | |
1325 | * unmapping, updates, and splits of the THP or other pages | |
1326 | * in the stage-2 block range. | |
9b5fdb97 | 1327 | */ |
136d737f MZ |
1328 | if ((memslot->userspace_addr & ~PMD_MASK) != |
1329 | ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK)) | |
9b5fdb97 | 1330 | force_pte = true; |
ad361f09 CD |
1331 | } |
1332 | up_read(¤t->mm->mmap_sem); | |
1333 | ||
94f8e641 | 1334 | /* We need minimum second+third level pages */ |
38f791a4 CD |
1335 | ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, |
1336 | KVM_NR_MEM_OBJS); | |
94f8e641 CD |
1337 | if (ret) |
1338 | return ret; | |
1339 | ||
1340 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | |
1341 | /* | |
1342 | * Ensure the read of mmu_notifier_seq happens before we call | |
1343 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | |
1344 | * the page we just got a reference to gets unmapped before we have a | |
1345 | * chance to grab the mmu_lock, which ensure that if the page gets | |
1346 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | |
1347 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | |
1348 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | |
1349 | */ | |
1350 | smp_rmb(); | |
1351 | ||
ad361f09 | 1352 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); |
196f878a JM |
1353 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
1354 | kvm_send_hwpoison_signal(hva, vma); | |
1355 | return 0; | |
1356 | } | |
9ac71595 | 1357 | if (is_error_noslot_pfn(pfn)) |
94f8e641 CD |
1358 | return -EFAULT; |
1359 | ||
15a49a44 | 1360 | if (kvm_is_device_pfn(pfn)) { |
b8865767 | 1361 | mem_type = PAGE_S2_DEVICE; |
15a49a44 MS |
1362 | flags |= KVM_S2PTE_FLAG_IS_IOMAP; |
1363 | } else if (logging_active) { | |
1364 | /* | |
1365 | * Faults on pages in a memslot with logging enabled | |
1366 | * should not be mapped with huge pages (it introduces churn | |
1367 | * and performance degradation), so force a pte mapping. | |
1368 | */ | |
1369 | force_pte = true; | |
1370 | flags |= KVM_S2_FLAG_LOGGING_ACTIVE; | |
1371 | ||
1372 | /* | |
1373 | * Only actually map the page as writable if this was a write | |
1374 | * fault. | |
1375 | */ | |
1376 | if (!write_fault) | |
1377 | writable = false; | |
1378 | } | |
b8865767 | 1379 | |
ad361f09 CD |
1380 | spin_lock(&kvm->mmu_lock); |
1381 | if (mmu_notifier_retry(kvm, mmu_seq)) | |
94f8e641 | 1382 | goto out_unlock; |
15a49a44 | 1383 | |
9b5fdb97 CD |
1384 | if (!hugetlb && !force_pte) |
1385 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); | |
ad361f09 CD |
1386 | |
1387 | if (hugetlb) { | |
b8865767 | 1388 | pmd_t new_pmd = pfn_pmd(pfn, mem_type); |
ad361f09 CD |
1389 | new_pmd = pmd_mkhuge(new_pmd); |
1390 | if (writable) { | |
06485053 | 1391 | new_pmd = kvm_s2pmd_mkwrite(new_pmd); |
ad361f09 CD |
1392 | kvm_set_pfn_dirty(pfn); |
1393 | } | |
13b7756c | 1394 | coherent_cache_guest_page(vcpu, pfn, PMD_SIZE); |
ad361f09 CD |
1395 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1396 | } else { | |
b8865767 | 1397 | pte_t new_pte = pfn_pte(pfn, mem_type); |
15a49a44 | 1398 | |
ad361f09 | 1399 | if (writable) { |
06485053 | 1400 | new_pte = kvm_s2pte_mkwrite(new_pte); |
ad361f09 | 1401 | kvm_set_pfn_dirty(pfn); |
15a49a44 | 1402 | mark_page_dirty(kvm, gfn); |
ad361f09 | 1403 | } |
13b7756c | 1404 | coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE); |
15a49a44 | 1405 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); |
94f8e641 | 1406 | } |
ad361f09 | 1407 | |
94f8e641 | 1408 | out_unlock: |
ad361f09 | 1409 | spin_unlock(&kvm->mmu_lock); |
35307b9a | 1410 | kvm_set_pfn_accessed(pfn); |
94f8e641 | 1411 | kvm_release_pfn_clean(pfn); |
ad361f09 | 1412 | return ret; |
94f8e641 CD |
1413 | } |
1414 | ||
aeda9130 MZ |
1415 | /* |
1416 | * Resolve the access fault by making the page young again. | |
1417 | * Note that because the faulting entry is guaranteed not to be | |
1418 | * cached in the TLB, we don't need to invalidate anything. | |
06485053 CM |
1419 | * Only the HW Access Flag updates are supported for Stage 2 (no DBM), |
1420 | * so there is no need for atomic (pte|pmd)_mkyoung operations. | |
aeda9130 MZ |
1421 | */ |
1422 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) | |
1423 | { | |
1424 | pmd_t *pmd; | |
1425 | pte_t *pte; | |
ba049e93 | 1426 | kvm_pfn_t pfn; |
aeda9130 MZ |
1427 | bool pfn_valid = false; |
1428 | ||
1429 | trace_kvm_access_fault(fault_ipa); | |
1430 | ||
1431 | spin_lock(&vcpu->kvm->mmu_lock); | |
1432 | ||
1433 | pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); | |
1434 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | |
1435 | goto out; | |
1436 | ||
bbb3b6b3 | 1437 | if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */ |
aeda9130 MZ |
1438 | *pmd = pmd_mkyoung(*pmd); |
1439 | pfn = pmd_pfn(*pmd); | |
1440 | pfn_valid = true; | |
1441 | goto out; | |
1442 | } | |
1443 | ||
1444 | pte = pte_offset_kernel(pmd, fault_ipa); | |
1445 | if (pte_none(*pte)) /* Nothing there either */ | |
1446 | goto out; | |
1447 | ||
1448 | *pte = pte_mkyoung(*pte); /* Just a page... */ | |
1449 | pfn = pte_pfn(*pte); | |
1450 | pfn_valid = true; | |
1451 | out: | |
1452 | spin_unlock(&vcpu->kvm->mmu_lock); | |
1453 | if (pfn_valid) | |
1454 | kvm_set_pfn_accessed(pfn); | |
1455 | } | |
1456 | ||
621f48e4 TB |
1457 | static bool is_abort_sea(unsigned long fault_status) |
1458 | { | |
1459 | switch (fault_status) { | |
1460 | case FSC_SEA: | |
1461 | case FSC_SEA_TTW0: | |
1462 | case FSC_SEA_TTW1: | |
1463 | case FSC_SEA_TTW2: | |
1464 | case FSC_SEA_TTW3: | |
1465 | case FSC_SECC: | |
1466 | case FSC_SECC_TTW0: | |
1467 | case FSC_SECC_TTW1: | |
1468 | case FSC_SECC_TTW2: | |
1469 | case FSC_SECC_TTW3: | |
1470 | return true; | |
1471 | default: | |
1472 | return false; | |
1473 | } | |
1474 | } | |
1475 | ||
94f8e641 CD |
1476 | /** |
1477 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
1478 | * @vcpu: the VCPU pointer | |
1479 | * @run: the kvm_run structure | |
1480 | * | |
1481 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
1482 | * missing second stage translation table entry, which can mean that either the | |
1483 | * guest simply needs more memory and we must allocate an appropriate page or it | |
1484 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
1485 | * space. The distinction is based on the IPA causing the fault and whether this | |
1486 | * memory region has been registered as standard RAM by user space. | |
1487 | */ | |
342cd0ab CD |
1488 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1489 | { | |
94f8e641 CD |
1490 | unsigned long fault_status; |
1491 | phys_addr_t fault_ipa; | |
1492 | struct kvm_memory_slot *memslot; | |
98047888 CD |
1493 | unsigned long hva; |
1494 | bool is_iabt, write_fault, writable; | |
94f8e641 CD |
1495 | gfn_t gfn; |
1496 | int ret, idx; | |
1497 | ||
621f48e4 TB |
1498 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
1499 | ||
1500 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | |
1501 | ||
1502 | /* | |
1503 | * The host kernel will handle the synchronous external abort. There | |
1504 | * is no need to pass the error into the guest. | |
1505 | */ | |
1506 | if (is_abort_sea(fault_status)) { | |
1507 | if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu))) | |
1508 | return 1; | |
1509 | } | |
1510 | ||
52d1dba9 | 1511 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
4055710b MZ |
1512 | if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) { |
1513 | kvm_inject_vabt(vcpu); | |
1514 | return 1; | |
1515 | } | |
1516 | ||
7393b599 MZ |
1517 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
1518 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | |
94f8e641 CD |
1519 | |
1520 | /* Check the stage-2 fault is trans. fault or write fault */ | |
35307b9a MZ |
1521 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
1522 | fault_status != FSC_ACCESS) { | |
0496daa5 CD |
1523 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1524 | kvm_vcpu_trap_get_class(vcpu), | |
1525 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | |
1526 | (unsigned long)kvm_vcpu_get_hsr(vcpu)); | |
94f8e641 CD |
1527 | return -EFAULT; |
1528 | } | |
1529 | ||
1530 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1531 | ||
1532 | gfn = fault_ipa >> PAGE_SHIFT; | |
98047888 CD |
1533 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
1534 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | |
a7d079ce | 1535 | write_fault = kvm_is_write_fault(vcpu); |
98047888 | 1536 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
94f8e641 CD |
1537 | if (is_iabt) { |
1538 | /* Prefetch Abort on I/O address */ | |
7393b599 | 1539 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
94f8e641 CD |
1540 | ret = 1; |
1541 | goto out_unlock; | |
1542 | } | |
1543 | ||
57c841f1 MZ |
1544 | /* |
1545 | * Check for a cache maintenance operation. Since we | |
1546 | * ended-up here, we know it is outside of any memory | |
1547 | * slot. But we can't find out if that is for a device, | |
1548 | * or if the guest is just being stupid. The only thing | |
1549 | * we know for sure is that this range cannot be cached. | |
1550 | * | |
1551 | * So let's assume that the guest is just being | |
1552 | * cautious, and skip the instruction. | |
1553 | */ | |
1554 | if (kvm_vcpu_dabt_is_cm(vcpu)) { | |
1555 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
1556 | ret = 1; | |
1557 | goto out_unlock; | |
1558 | } | |
1559 | ||
cfe3950c MZ |
1560 | /* |
1561 | * The IPA is reported as [MAX:12], so we need to | |
1562 | * complement it with the bottom 12 bits from the | |
1563 | * faulting VA. This is always 12 bits, irrespective | |
1564 | * of the page size. | |
1565 | */ | |
1566 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
45e96ea6 | 1567 | ret = io_mem_abort(vcpu, run, fault_ipa); |
94f8e641 CD |
1568 | goto out_unlock; |
1569 | } | |
1570 | ||
c3058d5d CD |
1571 | /* Userspace should not be able to register out-of-bounds IPAs */ |
1572 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); | |
1573 | ||
aeda9130 MZ |
1574 | if (fault_status == FSC_ACCESS) { |
1575 | handle_access_fault(vcpu, fault_ipa); | |
1576 | ret = 1; | |
1577 | goto out_unlock; | |
1578 | } | |
1579 | ||
98047888 | 1580 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
94f8e641 CD |
1581 | if (ret == 0) |
1582 | ret = 1; | |
1583 | out_unlock: | |
1584 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1585 | return ret; | |
342cd0ab CD |
1586 | } |
1587 | ||
1d2ebacc MZ |
1588 | static int handle_hva_to_gpa(struct kvm *kvm, |
1589 | unsigned long start, | |
1590 | unsigned long end, | |
1591 | int (*handler)(struct kvm *kvm, | |
056aad67 SP |
1592 | gpa_t gpa, u64 size, |
1593 | void *data), | |
1d2ebacc | 1594 | void *data) |
d5d8184d CD |
1595 | { |
1596 | struct kvm_memslots *slots; | |
1597 | struct kvm_memory_slot *memslot; | |
1d2ebacc | 1598 | int ret = 0; |
d5d8184d CD |
1599 | |
1600 | slots = kvm_memslots(kvm); | |
1601 | ||
1602 | /* we only care about the pages that the guest sees */ | |
1603 | kvm_for_each_memslot(memslot, slots) { | |
1604 | unsigned long hva_start, hva_end; | |
056aad67 | 1605 | gfn_t gpa; |
d5d8184d CD |
1606 | |
1607 | hva_start = max(start, memslot->userspace_addr); | |
1608 | hva_end = min(end, memslot->userspace_addr + | |
1609 | (memslot->npages << PAGE_SHIFT)); | |
1610 | if (hva_start >= hva_end) | |
1611 | continue; | |
1612 | ||
056aad67 SP |
1613 | gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; |
1614 | ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); | |
d5d8184d | 1615 | } |
1d2ebacc MZ |
1616 | |
1617 | return ret; | |
d5d8184d CD |
1618 | } |
1619 | ||
056aad67 | 1620 | static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
d5d8184d | 1621 | { |
056aad67 | 1622 | unmap_stage2_range(kvm, gpa, size); |
1d2ebacc | 1623 | return 0; |
d5d8184d CD |
1624 | } |
1625 | ||
1626 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
1627 | { | |
1628 | unsigned long end = hva + PAGE_SIZE; | |
1629 | ||
1630 | if (!kvm->arch.pgd) | |
1631 | return 0; | |
1632 | ||
1633 | trace_kvm_unmap_hva(hva); | |
1634 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | |
1635 | return 0; | |
1636 | } | |
1637 | ||
1638 | int kvm_unmap_hva_range(struct kvm *kvm, | |
1639 | unsigned long start, unsigned long end) | |
1640 | { | |
1641 | if (!kvm->arch.pgd) | |
1642 | return 0; | |
1643 | ||
1644 | trace_kvm_unmap_hva_range(start, end); | |
1645 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | |
1646 | return 0; | |
1647 | } | |
1648 | ||
056aad67 | 1649 | static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
d5d8184d CD |
1650 | { |
1651 | pte_t *pte = (pte_t *)data; | |
1652 | ||
056aad67 | 1653 | WARN_ON(size != PAGE_SIZE); |
15a49a44 MS |
1654 | /* |
1655 | * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE | |
1656 | * flag clear because MMU notifiers will have unmapped a huge PMD before | |
1657 | * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and | |
1658 | * therefore stage2_set_pte() never needs to clear out a huge PMD | |
1659 | * through this calling path. | |
1660 | */ | |
1661 | stage2_set_pte(kvm, NULL, gpa, pte, 0); | |
1d2ebacc | 1662 | return 0; |
d5d8184d CD |
1663 | } |
1664 | ||
1665 | ||
1666 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
1667 | { | |
1668 | unsigned long end = hva + PAGE_SIZE; | |
1669 | pte_t stage2_pte; | |
1670 | ||
1671 | if (!kvm->arch.pgd) | |
1672 | return; | |
1673 | ||
1674 | trace_kvm_set_spte_hva(hva); | |
1675 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | |
1676 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | |
1677 | } | |
1678 | ||
056aad67 | 1679 | static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
35307b9a MZ |
1680 | { |
1681 | pmd_t *pmd; | |
1682 | pte_t *pte; | |
1683 | ||
056aad67 | 1684 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE); |
35307b9a MZ |
1685 | pmd = stage2_get_pmd(kvm, NULL, gpa); |
1686 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | |
1687 | return 0; | |
1688 | ||
06485053 CM |
1689 | if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */ |
1690 | return stage2_pmdp_test_and_clear_young(pmd); | |
35307b9a MZ |
1691 | |
1692 | pte = pte_offset_kernel(pmd, gpa); | |
1693 | if (pte_none(*pte)) | |
1694 | return 0; | |
1695 | ||
06485053 | 1696 | return stage2_ptep_test_and_clear_young(pte); |
35307b9a MZ |
1697 | } |
1698 | ||
056aad67 | 1699 | static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
35307b9a MZ |
1700 | { |
1701 | pmd_t *pmd; | |
1702 | pte_t *pte; | |
1703 | ||
056aad67 | 1704 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE); |
35307b9a MZ |
1705 | pmd = stage2_get_pmd(kvm, NULL, gpa); |
1706 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | |
1707 | return 0; | |
1708 | ||
bbb3b6b3 | 1709 | if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */ |
35307b9a MZ |
1710 | return pmd_young(*pmd); |
1711 | ||
1712 | pte = pte_offset_kernel(pmd, gpa); | |
1713 | if (!pte_none(*pte)) /* Just a page... */ | |
1714 | return pte_young(*pte); | |
1715 | ||
1716 | return 0; | |
1717 | } | |
1718 | ||
1719 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) | |
1720 | { | |
7e5a6722 SP |
1721 | if (!kvm->arch.pgd) |
1722 | return 0; | |
35307b9a MZ |
1723 | trace_kvm_age_hva(start, end); |
1724 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); | |
1725 | } | |
1726 | ||
1727 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
1728 | { | |
7e5a6722 SP |
1729 | if (!kvm->arch.pgd) |
1730 | return 0; | |
35307b9a MZ |
1731 | trace_kvm_test_age_hva(hva); |
1732 | return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); | |
1733 | } | |
1734 | ||
d5d8184d CD |
1735 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
1736 | { | |
1737 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | |
1738 | } | |
1739 | ||
342cd0ab CD |
1740 | phys_addr_t kvm_mmu_get_httbr(void) |
1741 | { | |
e4c5a685 AB |
1742 | if (__kvm_cpu_uses_extended_idmap()) |
1743 | return virt_to_phys(merged_hyp_pgd); | |
1744 | else | |
1745 | return virt_to_phys(hyp_pgd); | |
342cd0ab CD |
1746 | } |
1747 | ||
5a677ce0 MZ |
1748 | phys_addr_t kvm_get_idmap_vector(void) |
1749 | { | |
1750 | return hyp_idmap_vector; | |
1751 | } | |
1752 | ||
0535a3e2 MZ |
1753 | static int kvm_map_idmap_text(pgd_t *pgd) |
1754 | { | |
1755 | int err; | |
1756 | ||
1757 | /* Create the idmap in the boot page tables */ | |
1758 | err = __create_hyp_mappings(pgd, | |
1759 | hyp_idmap_start, hyp_idmap_end, | |
1760 | __phys_to_pfn(hyp_idmap_start), | |
1761 | PAGE_HYP_EXEC); | |
1762 | if (err) | |
1763 | kvm_err("Failed to idmap %lx-%lx\n", | |
1764 | hyp_idmap_start, hyp_idmap_end); | |
1765 | ||
1766 | return err; | |
1767 | } | |
1768 | ||
342cd0ab CD |
1769 | int kvm_mmu_init(void) |
1770 | { | |
2fb41059 MZ |
1771 | int err; |
1772 | ||
4fda342c SS |
1773 | hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start); |
1774 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); | |
1775 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); | |
5a677ce0 | 1776 | |
06f75a1f AB |
1777 | /* |
1778 | * We rely on the linker script to ensure at build time that the HYP | |
1779 | * init code does not cross a page boundary. | |
1780 | */ | |
1781 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | |
5a677ce0 | 1782 | |
eac378a9 MZ |
1783 | kvm_info("IDMAP page: %lx\n", hyp_idmap_start); |
1784 | kvm_info("HYP VA range: %lx:%lx\n", | |
6c41a413 | 1785 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); |
eac378a9 | 1786 | |
6c41a413 | 1787 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
d2896d4b MZ |
1788 | hyp_idmap_start < kern_hyp_va(~0UL) && |
1789 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { | |
eac378a9 MZ |
1790 | /* |
1791 | * The idmap page is intersecting with the VA space, | |
1792 | * it is not safe to continue further. | |
1793 | */ | |
1794 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); | |
1795 | err = -EINVAL; | |
1796 | goto out; | |
1797 | } | |
1798 | ||
38f791a4 | 1799 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
0535a3e2 | 1800 | if (!hyp_pgd) { |
d5d8184d | 1801 | kvm_err("Hyp mode PGD not allocated\n"); |
2fb41059 MZ |
1802 | err = -ENOMEM; |
1803 | goto out; | |
1804 | } | |
1805 | ||
0535a3e2 MZ |
1806 | if (__kvm_cpu_uses_extended_idmap()) { |
1807 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
1808 | hyp_pgd_order); | |
1809 | if (!boot_hyp_pgd) { | |
1810 | kvm_err("Hyp boot PGD not allocated\n"); | |
1811 | err = -ENOMEM; | |
1812 | goto out; | |
1813 | } | |
2fb41059 | 1814 | |
0535a3e2 MZ |
1815 | err = kvm_map_idmap_text(boot_hyp_pgd); |
1816 | if (err) | |
1817 | goto out; | |
d5d8184d | 1818 | |
e4c5a685 AB |
1819 | merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
1820 | if (!merged_hyp_pgd) { | |
1821 | kvm_err("Failed to allocate extra HYP pgd\n"); | |
1822 | goto out; | |
1823 | } | |
1824 | __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, | |
1825 | hyp_idmap_start); | |
0535a3e2 MZ |
1826 | } else { |
1827 | err = kvm_map_idmap_text(hyp_pgd); | |
1828 | if (err) | |
1829 | goto out; | |
5a677ce0 MZ |
1830 | } |
1831 | ||
d5d8184d | 1832 | return 0; |
2fb41059 | 1833 | out: |
4f728276 | 1834 | free_hyp_pgds(); |
2fb41059 | 1835 | return err; |
342cd0ab | 1836 | } |
df6ce24f EA |
1837 | |
1838 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
09170a49 | 1839 | const struct kvm_userspace_memory_region *mem, |
df6ce24f | 1840 | const struct kvm_memory_slot *old, |
f36f3f28 | 1841 | const struct kvm_memory_slot *new, |
df6ce24f EA |
1842 | enum kvm_mr_change change) |
1843 | { | |
c6473555 MS |
1844 | /* |
1845 | * At this point memslot has been committed and there is an | |
1846 | * allocated dirty_bitmap[], dirty pages will be be tracked while the | |
1847 | * memory slot is write protected. | |
1848 | */ | |
1849 | if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) | |
1850 | kvm_mmu_wp_memory_region(kvm, mem->slot); | |
df6ce24f EA |
1851 | } |
1852 | ||
1853 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
1854 | struct kvm_memory_slot *memslot, | |
09170a49 | 1855 | const struct kvm_userspace_memory_region *mem, |
df6ce24f EA |
1856 | enum kvm_mr_change change) |
1857 | { | |
8eef9123 AB |
1858 | hva_t hva = mem->userspace_addr; |
1859 | hva_t reg_end = hva + mem->memory_size; | |
1860 | bool writable = !(mem->flags & KVM_MEM_READONLY); | |
1861 | int ret = 0; | |
1862 | ||
15a49a44 MS |
1863 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
1864 | change != KVM_MR_FLAGS_ONLY) | |
8eef9123 AB |
1865 | return 0; |
1866 | ||
c3058d5d CD |
1867 | /* |
1868 | * Prevent userspace from creating a memory region outside of the IPA | |
1869 | * space addressable by the KVM guest IPA space. | |
1870 | */ | |
1871 | if (memslot->base_gfn + memslot->npages >= | |
1872 | (KVM_PHYS_SIZE >> PAGE_SHIFT)) | |
1873 | return -EFAULT; | |
1874 | ||
72f31048 | 1875 | down_read(¤t->mm->mmap_sem); |
8eef9123 AB |
1876 | /* |
1877 | * A memory region could potentially cover multiple VMAs, and any holes | |
1878 | * between them, so iterate over all of them to find out if we can map | |
1879 | * any of them right now. | |
1880 | * | |
1881 | * +--------------------------------------------+ | |
1882 | * +---------------+----------------+ +----------------+ | |
1883 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
1884 | * +---------------+----------------+ +----------------+ | |
1885 | * | memory region | | |
1886 | * +--------------------------------------------+ | |
1887 | */ | |
1888 | do { | |
1889 | struct vm_area_struct *vma = find_vma(current->mm, hva); | |
1890 | hva_t vm_start, vm_end; | |
1891 | ||
1892 | if (!vma || vma->vm_start >= reg_end) | |
1893 | break; | |
1894 | ||
1895 | /* | |
1896 | * Mapping a read-only VMA is only allowed if the | |
1897 | * memory region is configured as read-only. | |
1898 | */ | |
1899 | if (writable && !(vma->vm_flags & VM_WRITE)) { | |
1900 | ret = -EPERM; | |
1901 | break; | |
1902 | } | |
1903 | ||
1904 | /* | |
1905 | * Take the intersection of this VMA with the memory region | |
1906 | */ | |
1907 | vm_start = max(hva, vma->vm_start); | |
1908 | vm_end = min(reg_end, vma->vm_end); | |
1909 | ||
1910 | if (vma->vm_flags & VM_PFNMAP) { | |
1911 | gpa_t gpa = mem->guest_phys_addr + | |
1912 | (vm_start - mem->userspace_addr); | |
ca09f02f MM |
1913 | phys_addr_t pa; |
1914 | ||
1915 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; | |
1916 | pa += vm_start - vma->vm_start; | |
8eef9123 | 1917 | |
15a49a44 | 1918 | /* IO region dirty page logging not allowed */ |
72f31048 MZ |
1919 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
1920 | ret = -EINVAL; | |
1921 | goto out; | |
1922 | } | |
15a49a44 | 1923 | |
8eef9123 AB |
1924 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
1925 | vm_end - vm_start, | |
1926 | writable); | |
1927 | if (ret) | |
1928 | break; | |
1929 | } | |
1930 | hva = vm_end; | |
1931 | } while (hva < reg_end); | |
1932 | ||
15a49a44 | 1933 | if (change == KVM_MR_FLAGS_ONLY) |
72f31048 | 1934 | goto out; |
15a49a44 | 1935 | |
849260c7 AB |
1936 | spin_lock(&kvm->mmu_lock); |
1937 | if (ret) | |
8eef9123 | 1938 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); |
849260c7 AB |
1939 | else |
1940 | stage2_flush_memslot(kvm, memslot); | |
1941 | spin_unlock(&kvm->mmu_lock); | |
72f31048 MZ |
1942 | out: |
1943 | up_read(¤t->mm->mmap_sem); | |
8eef9123 | 1944 | return ret; |
df6ce24f EA |
1945 | } |
1946 | ||
1947 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |
1948 | struct kvm_memory_slot *dont) | |
1949 | { | |
1950 | } | |
1951 | ||
1952 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |
1953 | unsigned long npages) | |
1954 | { | |
1955 | return 0; | |
1956 | } | |
1957 | ||
15f46015 | 1958 | void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) |
df6ce24f EA |
1959 | { |
1960 | } | |
1961 | ||
1962 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | |
1963 | { | |
293f2936 | 1964 | kvm_free_stage2_pgd(kvm); |
df6ce24f EA |
1965 | } |
1966 | ||
1967 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
1968 | struct kvm_memory_slot *slot) | |
1969 | { | |
8eef9123 AB |
1970 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
1971 | phys_addr_t size = slot->npages << PAGE_SHIFT; | |
1972 | ||
1973 | spin_lock(&kvm->mmu_lock); | |
1974 | unmap_stage2_range(kvm, gpa, size); | |
1975 | spin_unlock(&kvm->mmu_lock); | |
df6ce24f | 1976 | } |
3c1e7165 MZ |
1977 | |
1978 | /* | |
1979 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | |
1980 | * | |
1981 | * Main problems: | |
1982 | * - S/W ops are local to a CPU (not broadcast) | |
1983 | * - We have line migration behind our back (speculation) | |
1984 | * - System caches don't support S/W at all (damn!) | |
1985 | * | |
1986 | * In the face of the above, the best we can do is to try and convert | |
1987 | * S/W ops to VA ops. Because the guest is not allowed to infer the | |
1988 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | |
1989 | * which is a rather good thing for us. | |
1990 | * | |
1991 | * Also, it is only used when turning caches on/off ("The expected | |
1992 | * usage of the cache maintenance instructions that operate by set/way | |
1993 | * is associated with the cache maintenance instructions associated | |
1994 | * with the powerdown and powerup of caches, if this is required by | |
1995 | * the implementation."). | |
1996 | * | |
1997 | * We use the following policy: | |
1998 | * | |
1999 | * - If we trap a S/W operation, we enable VM trapping to detect | |
2000 | * caches being turned on/off, and do a full clean. | |
2001 | * | |
2002 | * - We flush the caches on both caches being turned on and off. | |
2003 | * | |
2004 | * - Once the caches are enabled, we stop trapping VM ops. | |
2005 | */ | |
2006 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | |
2007 | { | |
2008 | unsigned long hcr = vcpu_get_hcr(vcpu); | |
2009 | ||
2010 | /* | |
2011 | * If this is the first time we do a S/W operation | |
2012 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | |
2013 | * VM trapping. | |
2014 | * | |
2015 | * Otherwise, rely on the VM trapping to wait for the MMU + | |
2016 | * Caches to be turned off. At that point, we'll be able to | |
2017 | * clean the caches again. | |
2018 | */ | |
2019 | if (!(hcr & HCR_TVM)) { | |
2020 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | |
2021 | vcpu_has_cache_enabled(vcpu)); | |
2022 | stage2_flush_vm(vcpu->kvm); | |
2023 | vcpu_set_hcr(vcpu, hcr | HCR_TVM); | |
2024 | } | |
2025 | } | |
2026 | ||
2027 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | |
2028 | { | |
2029 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | |
2030 | ||
2031 | /* | |
2032 | * If switching the MMU+caches on, need to invalidate the caches. | |
2033 | * If switching it off, need to clean the caches. | |
2034 | * Clean + invalidate does the trick always. | |
2035 | */ | |
2036 | if (now_enabled != was_enabled) | |
2037 | stage2_flush_vm(vcpu->kvm); | |
2038 | ||
2039 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ | |
2040 | if (now_enabled) | |
2041 | vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); | |
2042 | ||
2043 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); | |
2044 | } |