2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <linux/sched/signal.h>
24 #include <trace/events/kvm.h>
25 #include <asm/pgalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_mmio.h>
30 #include <asm/kvm_asm.h>
31 #include <asm/kvm_emulate.h>
33 #include <asm/system_misc.h>
37 static pgd_t
*boot_hyp_pgd
;
38 static pgd_t
*hyp_pgd
;
39 static pgd_t
*merged_hyp_pgd
;
40 static DEFINE_MUTEX(kvm_hyp_pgd_mutex
);
42 static unsigned long hyp_idmap_start
;
43 static unsigned long hyp_idmap_end
;
44 static phys_addr_t hyp_idmap_vector
;
46 static unsigned long io_map_base
;
48 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
50 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
53 static bool memslot_is_logging(struct kvm_memory_slot
*memslot
)
55 return memslot
->dirty_bitmap
&& !(memslot
->flags
& KVM_MEM_READONLY
);
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
62 * Interface to HYP function to flush all VM TLB entries
64 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
66 kvm_call_hyp(__kvm_tlb_flush_vmid
, kvm
);
69 static void kvm_tlb_flush_vmid_ipa(struct kvm
*kvm
, phys_addr_t ipa
)
71 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa
, kvm
, ipa
);
75 * D-Cache management functions. They take the page table entries by
76 * value, as they are flushing the cache using the kernel mapping (or
79 static void kvm_flush_dcache_pte(pte_t pte
)
81 __kvm_flush_dcache_pte(pte
);
84 static void kvm_flush_dcache_pmd(pmd_t pmd
)
86 __kvm_flush_dcache_pmd(pmd
);
89 static void kvm_flush_dcache_pud(pud_t pud
)
91 __kvm_flush_dcache_pud(pud
);
94 static bool kvm_is_device_pfn(unsigned long pfn
)
96 return !pfn_valid(pfn
);
100 * stage2_dissolve_pmd() - clear and flush huge PMD entry
101 * @kvm: pointer to kvm structure.
103 * @pmd: pmd pointer for IPA
105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
106 * pages in the range dirty.
108 static void stage2_dissolve_pmd(struct kvm
*kvm
, phys_addr_t addr
, pmd_t
*pmd
)
110 if (!pmd_thp_or_huge(*pmd
))
114 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
115 put_page(virt_to_page(pmd
));
119 * stage2_dissolve_pud() - clear and flush huge PUD entry
120 * @kvm: pointer to kvm structure.
122 * @pud: pud pointer for IPA
124 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
125 * pages in the range dirty.
127 static void stage2_dissolve_pud(struct kvm
*kvm
, phys_addr_t addr
, pud_t
*pudp
)
129 if (!stage2_pud_huge(kvm
, *pudp
))
132 stage2_pud_clear(kvm
, pudp
);
133 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
134 put_page(virt_to_page(pudp
));
137 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
142 BUG_ON(max
> KVM_NR_MEM_OBJS
);
143 if (cache
->nobjs
>= min
)
145 while (cache
->nobjs
< max
) {
146 page
= (void *)__get_free_page(PGALLOC_GFP
);
149 cache
->objects
[cache
->nobjs
++] = page
;
154 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
)
157 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
160 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
)
164 BUG_ON(!mc
|| !mc
->nobjs
);
165 p
= mc
->objects
[--mc
->nobjs
];
169 static void clear_stage2_pgd_entry(struct kvm
*kvm
, pgd_t
*pgd
, phys_addr_t addr
)
171 pud_t
*pud_table __maybe_unused
= stage2_pud_offset(kvm
, pgd
, 0UL);
172 stage2_pgd_clear(kvm
, pgd
);
173 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
174 stage2_pud_free(kvm
, pud_table
);
175 put_page(virt_to_page(pgd
));
178 static void clear_stage2_pud_entry(struct kvm
*kvm
, pud_t
*pud
, phys_addr_t addr
)
180 pmd_t
*pmd_table __maybe_unused
= stage2_pmd_offset(kvm
, pud
, 0);
181 VM_BUG_ON(stage2_pud_huge(kvm
, *pud
));
182 stage2_pud_clear(kvm
, pud
);
183 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
184 stage2_pmd_free(kvm
, pmd_table
);
185 put_page(virt_to_page(pud
));
188 static void clear_stage2_pmd_entry(struct kvm
*kvm
, pmd_t
*pmd
, phys_addr_t addr
)
190 pte_t
*pte_table
= pte_offset_kernel(pmd
, 0);
191 VM_BUG_ON(pmd_thp_or_huge(*pmd
));
193 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
194 pte_free_kernel(NULL
, pte_table
);
195 put_page(virt_to_page(pmd
));
198 static inline void kvm_set_pte(pte_t
*ptep
, pte_t new_pte
)
200 WRITE_ONCE(*ptep
, new_pte
);
204 static inline void kvm_set_pmd(pmd_t
*pmdp
, pmd_t new_pmd
)
206 WRITE_ONCE(*pmdp
, new_pmd
);
210 static inline void kvm_pmd_populate(pmd_t
*pmdp
, pte_t
*ptep
)
212 kvm_set_pmd(pmdp
, kvm_mk_pmd(ptep
));
215 static inline void kvm_pud_populate(pud_t
*pudp
, pmd_t
*pmdp
)
217 WRITE_ONCE(*pudp
, kvm_mk_pud(pmdp
));
221 static inline void kvm_pgd_populate(pgd_t
*pgdp
, pud_t
*pudp
)
223 WRITE_ONCE(*pgdp
, kvm_mk_pgd(pudp
));
228 * Unmapping vs dcache management:
230 * If a guest maps certain memory pages as uncached, all writes will
231 * bypass the data cache and go directly to RAM. However, the CPUs
232 * can still speculate reads (not writes) and fill cache lines with
235 * Those cache lines will be *clean* cache lines though, so a
236 * clean+invalidate operation is equivalent to an invalidate
237 * operation, because no cache lines are marked dirty.
239 * Those clean cache lines could be filled prior to an uncached write
240 * by the guest, and the cache coherent IO subsystem would therefore
241 * end up writing old data to disk.
243 * This is why right after unmapping a page/section and invalidating
244 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
245 * the IO subsystem will never hit in the cache.
247 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
248 * we then fully enforce cacheability of RAM, no matter what the guest
251 static void unmap_stage2_ptes(struct kvm
*kvm
, pmd_t
*pmd
,
252 phys_addr_t addr
, phys_addr_t end
)
254 phys_addr_t start_addr
= addr
;
255 pte_t
*pte
, *start_pte
;
257 start_pte
= pte
= pte_offset_kernel(pmd
, addr
);
259 if (!pte_none(*pte
)) {
260 pte_t old_pte
= *pte
;
262 kvm_set_pte(pte
, __pte(0));
263 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
265 /* No need to invalidate the cache for device mappings */
266 if (!kvm_is_device_pfn(pte_pfn(old_pte
)))
267 kvm_flush_dcache_pte(old_pte
);
269 put_page(virt_to_page(pte
));
271 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
273 if (stage2_pte_table_empty(kvm
, start_pte
))
274 clear_stage2_pmd_entry(kvm
, pmd
, start_addr
);
277 static void unmap_stage2_pmds(struct kvm
*kvm
, pud_t
*pud
,
278 phys_addr_t addr
, phys_addr_t end
)
280 phys_addr_t next
, start_addr
= addr
;
281 pmd_t
*pmd
, *start_pmd
;
283 start_pmd
= pmd
= stage2_pmd_offset(kvm
, pud
, addr
);
285 next
= stage2_pmd_addr_end(kvm
, addr
, end
);
286 if (!pmd_none(*pmd
)) {
287 if (pmd_thp_or_huge(*pmd
)) {
288 pmd_t old_pmd
= *pmd
;
291 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
293 kvm_flush_dcache_pmd(old_pmd
);
295 put_page(virt_to_page(pmd
));
297 unmap_stage2_ptes(kvm
, pmd
, addr
, next
);
300 } while (pmd
++, addr
= next
, addr
!= end
);
302 if (stage2_pmd_table_empty(kvm
, start_pmd
))
303 clear_stage2_pud_entry(kvm
, pud
, start_addr
);
306 static void unmap_stage2_puds(struct kvm
*kvm
, pgd_t
*pgd
,
307 phys_addr_t addr
, phys_addr_t end
)
309 phys_addr_t next
, start_addr
= addr
;
310 pud_t
*pud
, *start_pud
;
312 start_pud
= pud
= stage2_pud_offset(kvm
, pgd
, addr
);
314 next
= stage2_pud_addr_end(kvm
, addr
, end
);
315 if (!stage2_pud_none(kvm
, *pud
)) {
316 if (stage2_pud_huge(kvm
, *pud
)) {
317 pud_t old_pud
= *pud
;
319 stage2_pud_clear(kvm
, pud
);
320 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
321 kvm_flush_dcache_pud(old_pud
);
322 put_page(virt_to_page(pud
));
324 unmap_stage2_pmds(kvm
, pud
, addr
, next
);
327 } while (pud
++, addr
= next
, addr
!= end
);
329 if (stage2_pud_table_empty(kvm
, start_pud
))
330 clear_stage2_pgd_entry(kvm
, pgd
, start_addr
);
334 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
335 * @kvm: The VM pointer
336 * @start: The intermediate physical base address of the range to unmap
337 * @size: The size of the area to unmap
339 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
340 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
341 * destroying the VM), otherwise another faulting VCPU may come in and mess
342 * with things behind our backs.
344 static void unmap_stage2_range(struct kvm
*kvm
, phys_addr_t start
, u64 size
)
347 phys_addr_t addr
= start
, end
= start
+ size
;
350 assert_spin_locked(&kvm
->mmu_lock
);
351 WARN_ON(size
& ~PAGE_MASK
);
353 pgd
= kvm
->arch
.pgd
+ stage2_pgd_index(kvm
, addr
);
356 * Make sure the page table is still active, as another thread
357 * could have possibly freed the page table, while we released
360 if (!READ_ONCE(kvm
->arch
.pgd
))
362 next
= stage2_pgd_addr_end(kvm
, addr
, end
);
363 if (!stage2_pgd_none(kvm
, *pgd
))
364 unmap_stage2_puds(kvm
, pgd
, addr
, next
);
366 * If the range is too large, release the kvm->mmu_lock
367 * to prevent starvation and lockup detector warnings.
370 cond_resched_lock(&kvm
->mmu_lock
);
371 } while (pgd
++, addr
= next
, addr
!= end
);
374 static void stage2_flush_ptes(struct kvm
*kvm
, pmd_t
*pmd
,
375 phys_addr_t addr
, phys_addr_t end
)
379 pte
= pte_offset_kernel(pmd
, addr
);
381 if (!pte_none(*pte
) && !kvm_is_device_pfn(pte_pfn(*pte
)))
382 kvm_flush_dcache_pte(*pte
);
383 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
386 static void stage2_flush_pmds(struct kvm
*kvm
, pud_t
*pud
,
387 phys_addr_t addr
, phys_addr_t end
)
392 pmd
= stage2_pmd_offset(kvm
, pud
, addr
);
394 next
= stage2_pmd_addr_end(kvm
, addr
, end
);
395 if (!pmd_none(*pmd
)) {
396 if (pmd_thp_or_huge(*pmd
))
397 kvm_flush_dcache_pmd(*pmd
);
399 stage2_flush_ptes(kvm
, pmd
, addr
, next
);
401 } while (pmd
++, addr
= next
, addr
!= end
);
404 static void stage2_flush_puds(struct kvm
*kvm
, pgd_t
*pgd
,
405 phys_addr_t addr
, phys_addr_t end
)
410 pud
= stage2_pud_offset(kvm
, pgd
, addr
);
412 next
= stage2_pud_addr_end(kvm
, addr
, end
);
413 if (!stage2_pud_none(kvm
, *pud
)) {
414 if (stage2_pud_huge(kvm
, *pud
))
415 kvm_flush_dcache_pud(*pud
);
417 stage2_flush_pmds(kvm
, pud
, addr
, next
);
419 } while (pud
++, addr
= next
, addr
!= end
);
422 static void stage2_flush_memslot(struct kvm
*kvm
,
423 struct kvm_memory_slot
*memslot
)
425 phys_addr_t addr
= memslot
->base_gfn
<< PAGE_SHIFT
;
426 phys_addr_t end
= addr
+ PAGE_SIZE
* memslot
->npages
;
430 pgd
= kvm
->arch
.pgd
+ stage2_pgd_index(kvm
, addr
);
432 next
= stage2_pgd_addr_end(kvm
, addr
, end
);
433 if (!stage2_pgd_none(kvm
, *pgd
))
434 stage2_flush_puds(kvm
, pgd
, addr
, next
);
435 } while (pgd
++, addr
= next
, addr
!= end
);
439 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
440 * @kvm: The struct kvm pointer
442 * Go through the stage 2 page tables and invalidate any cache lines
443 * backing memory already mapped to the VM.
445 static void stage2_flush_vm(struct kvm
*kvm
)
447 struct kvm_memslots
*slots
;
448 struct kvm_memory_slot
*memslot
;
451 idx
= srcu_read_lock(&kvm
->srcu
);
452 spin_lock(&kvm
->mmu_lock
);
454 slots
= kvm_memslots(kvm
);
455 kvm_for_each_memslot(memslot
, slots
)
456 stage2_flush_memslot(kvm
, memslot
);
458 spin_unlock(&kvm
->mmu_lock
);
459 srcu_read_unlock(&kvm
->srcu
, idx
);
462 static void clear_hyp_pgd_entry(pgd_t
*pgd
)
464 pud_t
*pud_table __maybe_unused
= pud_offset(pgd
, 0UL);
466 pud_free(NULL
, pud_table
);
467 put_page(virt_to_page(pgd
));
470 static void clear_hyp_pud_entry(pud_t
*pud
)
472 pmd_t
*pmd_table __maybe_unused
= pmd_offset(pud
, 0);
473 VM_BUG_ON(pud_huge(*pud
));
475 pmd_free(NULL
, pmd_table
);
476 put_page(virt_to_page(pud
));
479 static void clear_hyp_pmd_entry(pmd_t
*pmd
)
481 pte_t
*pte_table
= pte_offset_kernel(pmd
, 0);
482 VM_BUG_ON(pmd_thp_or_huge(*pmd
));
484 pte_free_kernel(NULL
, pte_table
);
485 put_page(virt_to_page(pmd
));
488 static void unmap_hyp_ptes(pmd_t
*pmd
, phys_addr_t addr
, phys_addr_t end
)
490 pte_t
*pte
, *start_pte
;
492 start_pte
= pte
= pte_offset_kernel(pmd
, addr
);
494 if (!pte_none(*pte
)) {
495 kvm_set_pte(pte
, __pte(0));
496 put_page(virt_to_page(pte
));
498 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
500 if (hyp_pte_table_empty(start_pte
))
501 clear_hyp_pmd_entry(pmd
);
504 static void unmap_hyp_pmds(pud_t
*pud
, phys_addr_t addr
, phys_addr_t end
)
507 pmd_t
*pmd
, *start_pmd
;
509 start_pmd
= pmd
= pmd_offset(pud
, addr
);
511 next
= pmd_addr_end(addr
, end
);
512 /* Hyp doesn't use huge pmds */
514 unmap_hyp_ptes(pmd
, addr
, next
);
515 } while (pmd
++, addr
= next
, addr
!= end
);
517 if (hyp_pmd_table_empty(start_pmd
))
518 clear_hyp_pud_entry(pud
);
521 static void unmap_hyp_puds(pgd_t
*pgd
, phys_addr_t addr
, phys_addr_t end
)
524 pud_t
*pud
, *start_pud
;
526 start_pud
= pud
= pud_offset(pgd
, addr
);
528 next
= pud_addr_end(addr
, end
);
529 /* Hyp doesn't use huge puds */
531 unmap_hyp_pmds(pud
, addr
, next
);
532 } while (pud
++, addr
= next
, addr
!= end
);
534 if (hyp_pud_table_empty(start_pud
))
535 clear_hyp_pgd_entry(pgd
);
538 static unsigned int kvm_pgd_index(unsigned long addr
, unsigned int ptrs_per_pgd
)
540 return (addr
>> PGDIR_SHIFT
) & (ptrs_per_pgd
- 1);
543 static void __unmap_hyp_range(pgd_t
*pgdp
, unsigned long ptrs_per_pgd
,
544 phys_addr_t start
, u64 size
)
547 phys_addr_t addr
= start
, end
= start
+ size
;
551 * We don't unmap anything from HYP, except at the hyp tear down.
552 * Hence, we don't have to invalidate the TLBs here.
554 pgd
= pgdp
+ kvm_pgd_index(addr
, ptrs_per_pgd
);
556 next
= pgd_addr_end(addr
, end
);
558 unmap_hyp_puds(pgd
, addr
, next
);
559 } while (pgd
++, addr
= next
, addr
!= end
);
562 static void unmap_hyp_range(pgd_t
*pgdp
, phys_addr_t start
, u64 size
)
564 __unmap_hyp_range(pgdp
, PTRS_PER_PGD
, start
, size
);
567 static void unmap_hyp_idmap_range(pgd_t
*pgdp
, phys_addr_t start
, u64 size
)
569 __unmap_hyp_range(pgdp
, __kvm_idmap_ptrs_per_pgd(), start
, size
);
573 * free_hyp_pgds - free Hyp-mode page tables
575 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
576 * therefore contains either mappings in the kernel memory area (above
577 * PAGE_OFFSET), or device mappings in the idmap range.
579 * boot_hyp_pgd should only map the idmap range, and is only used in
580 * the extended idmap case.
582 void free_hyp_pgds(void)
586 mutex_lock(&kvm_hyp_pgd_mutex
);
588 id_pgd
= boot_hyp_pgd
? boot_hyp_pgd
: hyp_pgd
;
591 /* In case we never called hyp_mmu_init() */
593 io_map_base
= hyp_idmap_start
;
594 unmap_hyp_idmap_range(id_pgd
, io_map_base
,
595 hyp_idmap_start
+ PAGE_SIZE
- io_map_base
);
599 free_pages((unsigned long)boot_hyp_pgd
, hyp_pgd_order
);
604 unmap_hyp_range(hyp_pgd
, kern_hyp_va(PAGE_OFFSET
),
605 (uintptr_t)high_memory
- PAGE_OFFSET
);
607 free_pages((unsigned long)hyp_pgd
, hyp_pgd_order
);
610 if (merged_hyp_pgd
) {
611 clear_page(merged_hyp_pgd
);
612 free_page((unsigned long)merged_hyp_pgd
);
613 merged_hyp_pgd
= NULL
;
616 mutex_unlock(&kvm_hyp_pgd_mutex
);
619 static void create_hyp_pte_mappings(pmd_t
*pmd
, unsigned long start
,
620 unsigned long end
, unsigned long pfn
,
628 pte
= pte_offset_kernel(pmd
, addr
);
629 kvm_set_pte(pte
, kvm_pfn_pte(pfn
, prot
));
630 get_page(virt_to_page(pte
));
632 } while (addr
+= PAGE_SIZE
, addr
!= end
);
635 static int create_hyp_pmd_mappings(pud_t
*pud
, unsigned long start
,
636 unsigned long end
, unsigned long pfn
,
641 unsigned long addr
, next
;
645 pmd
= pmd_offset(pud
, addr
);
647 BUG_ON(pmd_sect(*pmd
));
649 if (pmd_none(*pmd
)) {
650 pte
= pte_alloc_one_kernel(NULL
, addr
);
652 kvm_err("Cannot allocate Hyp pte\n");
655 kvm_pmd_populate(pmd
, pte
);
656 get_page(virt_to_page(pmd
));
659 next
= pmd_addr_end(addr
, end
);
661 create_hyp_pte_mappings(pmd
, addr
, next
, pfn
, prot
);
662 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
663 } while (addr
= next
, addr
!= end
);
668 static int create_hyp_pud_mappings(pgd_t
*pgd
, unsigned long start
,
669 unsigned long end
, unsigned long pfn
,
674 unsigned long addr
, next
;
679 pud
= pud_offset(pgd
, addr
);
681 if (pud_none_or_clear_bad(pud
)) {
682 pmd
= pmd_alloc_one(NULL
, addr
);
684 kvm_err("Cannot allocate Hyp pmd\n");
687 kvm_pud_populate(pud
, pmd
);
688 get_page(virt_to_page(pud
));
691 next
= pud_addr_end(addr
, end
);
692 ret
= create_hyp_pmd_mappings(pud
, addr
, next
, pfn
, prot
);
695 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
696 } while (addr
= next
, addr
!= end
);
701 static int __create_hyp_mappings(pgd_t
*pgdp
, unsigned long ptrs_per_pgd
,
702 unsigned long start
, unsigned long end
,
703 unsigned long pfn
, pgprot_t prot
)
707 unsigned long addr
, next
;
710 mutex_lock(&kvm_hyp_pgd_mutex
);
711 addr
= start
& PAGE_MASK
;
712 end
= PAGE_ALIGN(end
);
714 pgd
= pgdp
+ kvm_pgd_index(addr
, ptrs_per_pgd
);
716 if (pgd_none(*pgd
)) {
717 pud
= pud_alloc_one(NULL
, addr
);
719 kvm_err("Cannot allocate Hyp pud\n");
723 kvm_pgd_populate(pgd
, pud
);
724 get_page(virt_to_page(pgd
));
727 next
= pgd_addr_end(addr
, end
);
728 err
= create_hyp_pud_mappings(pgd
, addr
, next
, pfn
, prot
);
731 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
732 } while (addr
= next
, addr
!= end
);
734 mutex_unlock(&kvm_hyp_pgd_mutex
);
738 static phys_addr_t
kvm_kaddr_to_phys(void *kaddr
)
740 if (!is_vmalloc_addr(kaddr
)) {
741 BUG_ON(!virt_addr_valid(kaddr
));
744 return page_to_phys(vmalloc_to_page(kaddr
)) +
745 offset_in_page(kaddr
);
750 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
751 * @from: The virtual kernel start address of the range
752 * @to: The virtual kernel end address of the range (exclusive)
753 * @prot: The protection to be applied to this range
755 * The same virtual address as the kernel virtual address is also used
756 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
759 int create_hyp_mappings(void *from
, void *to
, pgprot_t prot
)
761 phys_addr_t phys_addr
;
762 unsigned long virt_addr
;
763 unsigned long start
= kern_hyp_va((unsigned long)from
);
764 unsigned long end
= kern_hyp_va((unsigned long)to
);
766 if (is_kernel_in_hyp_mode())
769 start
= start
& PAGE_MASK
;
770 end
= PAGE_ALIGN(end
);
772 for (virt_addr
= start
; virt_addr
< end
; virt_addr
+= PAGE_SIZE
) {
775 phys_addr
= kvm_kaddr_to_phys(from
+ virt_addr
- start
);
776 err
= __create_hyp_mappings(hyp_pgd
, PTRS_PER_PGD
,
777 virt_addr
, virt_addr
+ PAGE_SIZE
,
778 __phys_to_pfn(phys_addr
),
787 static int __create_hyp_private_mapping(phys_addr_t phys_addr
, size_t size
,
788 unsigned long *haddr
, pgprot_t prot
)
790 pgd_t
*pgd
= hyp_pgd
;
794 mutex_lock(&kvm_hyp_pgd_mutex
);
797 * This assumes that we we have enough space below the idmap
798 * page to allocate our VAs. If not, the check below will
799 * kick. A potential alternative would be to detect that
800 * overflow and switch to an allocation above the idmap.
802 * The allocated size is always a multiple of PAGE_SIZE.
804 size
= PAGE_ALIGN(size
+ offset_in_page(phys_addr
));
805 base
= io_map_base
- size
;
808 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
809 * allocating the new area, as it would indicate we've
810 * overflowed the idmap/IO address range.
812 if ((base
^ io_map_base
) & BIT(VA_BITS
- 1))
817 mutex_unlock(&kvm_hyp_pgd_mutex
);
822 if (__kvm_cpu_uses_extended_idmap())
825 ret
= __create_hyp_mappings(pgd
, __kvm_idmap_ptrs_per_pgd(),
827 __phys_to_pfn(phys_addr
), prot
);
831 *haddr
= base
+ offset_in_page(phys_addr
);
838 * create_hyp_io_mappings - Map IO into both kernel and HYP
839 * @phys_addr: The physical start address which gets mapped
840 * @size: Size of the region being mapped
841 * @kaddr: Kernel VA for this mapping
842 * @haddr: HYP VA for this mapping
844 int create_hyp_io_mappings(phys_addr_t phys_addr
, size_t size
,
845 void __iomem
**kaddr
,
846 void __iomem
**haddr
)
851 *kaddr
= ioremap(phys_addr
, size
);
855 if (is_kernel_in_hyp_mode()) {
860 ret
= __create_hyp_private_mapping(phys_addr
, size
,
861 &addr
, PAGE_HYP_DEVICE
);
869 *haddr
= (void __iomem
*)addr
;
874 * create_hyp_exec_mappings - Map an executable range into HYP
875 * @phys_addr: The physical start address which gets mapped
876 * @size: Size of the region being mapped
877 * @haddr: HYP VA for this mapping
879 int create_hyp_exec_mappings(phys_addr_t phys_addr
, size_t size
,
885 BUG_ON(is_kernel_in_hyp_mode());
887 ret
= __create_hyp_private_mapping(phys_addr
, size
,
888 &addr
, PAGE_HYP_EXEC
);
894 *haddr
= (void *)addr
;
899 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
900 * @kvm: The KVM struct pointer for the VM.
902 * Allocates only the stage-2 HW PGD level table(s) (can support either full
903 * 40-bit input addresses or limited to 32-bit input addresses). Clears the
906 * Note we don't need locking here as this is only called when the VM is
907 * created, which can only be done once.
909 int kvm_alloc_stage2_pgd(struct kvm
*kvm
)
913 if (kvm
->arch
.pgd
!= NULL
) {
914 kvm_err("kvm_arch already initialized?\n");
918 /* Allocate the HW PGD, making sure that each page gets its own refcount */
919 pgd
= alloc_pages_exact(stage2_pgd_size(kvm
), GFP_KERNEL
| __GFP_ZERO
);
927 static void stage2_unmap_memslot(struct kvm
*kvm
,
928 struct kvm_memory_slot
*memslot
)
930 hva_t hva
= memslot
->userspace_addr
;
931 phys_addr_t addr
= memslot
->base_gfn
<< PAGE_SHIFT
;
932 phys_addr_t size
= PAGE_SIZE
* memslot
->npages
;
933 hva_t reg_end
= hva
+ size
;
936 * A memory region could potentially cover multiple VMAs, and any holes
937 * between them, so iterate over all of them to find out if we should
940 * +--------------------------------------------+
941 * +---------------+----------------+ +----------------+
942 * | : VMA 1 | VMA 2 | | VMA 3 : |
943 * +---------------+----------------+ +----------------+
945 * +--------------------------------------------+
948 struct vm_area_struct
*vma
= find_vma(current
->mm
, hva
);
949 hva_t vm_start
, vm_end
;
951 if (!vma
|| vma
->vm_start
>= reg_end
)
955 * Take the intersection of this VMA with the memory region
957 vm_start
= max(hva
, vma
->vm_start
);
958 vm_end
= min(reg_end
, vma
->vm_end
);
960 if (!(vma
->vm_flags
& VM_PFNMAP
)) {
961 gpa_t gpa
= addr
+ (vm_start
- memslot
->userspace_addr
);
962 unmap_stage2_range(kvm
, gpa
, vm_end
- vm_start
);
965 } while (hva
< reg_end
);
969 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
970 * @kvm: The struct kvm pointer
972 * Go through the memregions and unmap any reguler RAM
973 * backing memory already mapped to the VM.
975 void stage2_unmap_vm(struct kvm
*kvm
)
977 struct kvm_memslots
*slots
;
978 struct kvm_memory_slot
*memslot
;
981 idx
= srcu_read_lock(&kvm
->srcu
);
982 down_read(¤t
->mm
->mmap_sem
);
983 spin_lock(&kvm
->mmu_lock
);
985 slots
= kvm_memslots(kvm
);
986 kvm_for_each_memslot(memslot
, slots
)
987 stage2_unmap_memslot(kvm
, memslot
);
989 spin_unlock(&kvm
->mmu_lock
);
990 up_read(¤t
->mm
->mmap_sem
);
991 srcu_read_unlock(&kvm
->srcu
, idx
);
995 * kvm_free_stage2_pgd - free all stage-2 tables
996 * @kvm: The KVM struct pointer for the VM.
998 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
999 * underlying level-2 and level-3 tables before freeing the actual level-1 table
1000 * and setting the struct pointer to NULL.
1002 void kvm_free_stage2_pgd(struct kvm
*kvm
)
1006 spin_lock(&kvm
->mmu_lock
);
1007 if (kvm
->arch
.pgd
) {
1008 unmap_stage2_range(kvm
, 0, kvm_phys_size(kvm
));
1009 pgd
= READ_ONCE(kvm
->arch
.pgd
);
1010 kvm
->arch
.pgd
= NULL
;
1012 spin_unlock(&kvm
->mmu_lock
);
1014 /* Free the HW pgd, one page at a time */
1016 free_pages_exact(pgd
, stage2_pgd_size(kvm
));
1019 static pud_t
*stage2_get_pud(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
1025 pgd
= kvm
->arch
.pgd
+ stage2_pgd_index(kvm
, addr
);
1026 if (stage2_pgd_none(kvm
, *pgd
)) {
1029 pud
= mmu_memory_cache_alloc(cache
);
1030 stage2_pgd_populate(kvm
, pgd
, pud
);
1031 get_page(virt_to_page(pgd
));
1034 return stage2_pud_offset(kvm
, pgd
, addr
);
1037 static pmd_t
*stage2_get_pmd(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
1043 pud
= stage2_get_pud(kvm
, cache
, addr
);
1044 if (!pud
|| stage2_pud_huge(kvm
, *pud
))
1047 if (stage2_pud_none(kvm
, *pud
)) {
1050 pmd
= mmu_memory_cache_alloc(cache
);
1051 stage2_pud_populate(kvm
, pud
, pmd
);
1052 get_page(virt_to_page(pud
));
1055 return stage2_pmd_offset(kvm
, pud
, addr
);
1058 static int stage2_set_pmd_huge(struct kvm
*kvm
, struct kvm_mmu_memory_cache
1059 *cache
, phys_addr_t addr
, const pmd_t
*new_pmd
)
1061 pmd_t
*pmd
, old_pmd
;
1063 pmd
= stage2_get_pmd(kvm
, cache
, addr
);
1067 if (pmd_present(old_pmd
)) {
1069 * Multiple vcpus faulting on the same PMD entry, can
1070 * lead to them sequentially updating the PMD with the
1071 * same value. Following the break-before-make
1072 * (pmd_clear() followed by tlb_flush()) process can
1073 * hinder forward progress due to refaults generated
1074 * on missing translations.
1076 * Skip updating the page table if the entry is
1079 if (pmd_val(old_pmd
) == pmd_val(*new_pmd
))
1083 * Mapping in huge pages should only happen through a
1084 * fault. If a page is merged into a transparent huge
1085 * page, the individual subpages of that huge page
1086 * should be unmapped through MMU notifiers before we
1089 * Merging of CompoundPages is not supported; they
1090 * should become splitting first, unmapped, merged,
1091 * and mapped back in on-demand.
1093 VM_BUG_ON(pmd_pfn(old_pmd
) != pmd_pfn(*new_pmd
));
1096 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
1098 get_page(virt_to_page(pmd
));
1101 kvm_set_pmd(pmd
, *new_pmd
);
1105 static int stage2_set_pud_huge(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
1106 phys_addr_t addr
, const pud_t
*new_pudp
)
1108 pud_t
*pudp
, old_pud
;
1110 pudp
= stage2_get_pud(kvm
, cache
, addr
);
1116 * A large number of vcpus faulting on the same stage 2 entry,
1117 * can lead to a refault due to the
1118 * stage2_pud_clear()/tlb_flush(). Skip updating the page
1119 * tables if there is no change.
1121 if (pud_val(old_pud
) == pud_val(*new_pudp
))
1124 if (stage2_pud_present(kvm
, old_pud
)) {
1125 stage2_pud_clear(kvm
, pudp
);
1126 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
1128 get_page(virt_to_page(pudp
));
1131 kvm_set_pud(pudp
, *new_pudp
);
1136 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1137 * true if a valid and present leaf-entry is found. A pointer to the
1138 * leaf-entry is returned in the appropriate level variable - pudpp,
1141 static bool stage2_get_leaf_entry(struct kvm
*kvm
, phys_addr_t addr
,
1142 pud_t
**pudpp
, pmd_t
**pmdpp
, pte_t
**ptepp
)
1152 pudp
= stage2_get_pud(kvm
, NULL
, addr
);
1153 if (!pudp
|| stage2_pud_none(kvm
, *pudp
) || !stage2_pud_present(kvm
, *pudp
))
1156 if (stage2_pud_huge(kvm
, *pudp
)) {
1161 pmdp
= stage2_pmd_offset(kvm
, pudp
, addr
);
1162 if (!pmdp
|| pmd_none(*pmdp
) || !pmd_present(*pmdp
))
1165 if (pmd_thp_or_huge(*pmdp
)) {
1170 ptep
= pte_offset_kernel(pmdp
, addr
);
1171 if (!ptep
|| pte_none(*ptep
) || !pte_present(*ptep
))
1178 static bool stage2_is_exec(struct kvm
*kvm
, phys_addr_t addr
)
1185 found
= stage2_get_leaf_entry(kvm
, addr
, &pudp
, &pmdp
, &ptep
);
1190 return kvm_s2pud_exec(pudp
);
1192 return kvm_s2pmd_exec(pmdp
);
1194 return kvm_s2pte_exec(ptep
);
1197 static int stage2_set_pte(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
1198 phys_addr_t addr
, const pte_t
*new_pte
,
1199 unsigned long flags
)
1203 pte_t
*pte
, old_pte
;
1204 bool iomap
= flags
& KVM_S2PTE_FLAG_IS_IOMAP
;
1205 bool logging_active
= flags
& KVM_S2_FLAG_LOGGING_ACTIVE
;
1207 VM_BUG_ON(logging_active
&& !cache
);
1209 /* Create stage-2 page table mapping - Levels 0 and 1 */
1210 pud
= stage2_get_pud(kvm
, cache
, addr
);
1213 * Ignore calls from kvm_set_spte_hva for unallocated
1220 * While dirty page logging - dissolve huge PUD, then continue
1221 * on to allocate page.
1224 stage2_dissolve_pud(kvm
, addr
, pud
);
1226 if (stage2_pud_none(kvm
, *pud
)) {
1228 return 0; /* ignore calls from kvm_set_spte_hva */
1229 pmd
= mmu_memory_cache_alloc(cache
);
1230 stage2_pud_populate(kvm
, pud
, pmd
);
1231 get_page(virt_to_page(pud
));
1234 pmd
= stage2_pmd_offset(kvm
, pud
, addr
);
1237 * Ignore calls from kvm_set_spte_hva for unallocated
1244 * While dirty page logging - dissolve huge PMD, then continue on to
1248 stage2_dissolve_pmd(kvm
, addr
, pmd
);
1250 /* Create stage-2 page mappings - Level 2 */
1251 if (pmd_none(*pmd
)) {
1253 return 0; /* ignore calls from kvm_set_spte_hva */
1254 pte
= mmu_memory_cache_alloc(cache
);
1255 kvm_pmd_populate(pmd
, pte
);
1256 get_page(virt_to_page(pmd
));
1259 pte
= pte_offset_kernel(pmd
, addr
);
1261 if (iomap
&& pte_present(*pte
))
1264 /* Create 2nd stage page table mapping - Level 3 */
1266 if (pte_present(old_pte
)) {
1267 /* Skip page table update if there is no change */
1268 if (pte_val(old_pte
) == pte_val(*new_pte
))
1271 kvm_set_pte(pte
, __pte(0));
1272 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
1274 get_page(virt_to_page(pte
));
1277 kvm_set_pte(pte
, *new_pte
);
1281 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1282 static int stage2_ptep_test_and_clear_young(pte_t
*pte
)
1284 if (pte_young(*pte
)) {
1285 *pte
= pte_mkold(*pte
);
1291 static int stage2_ptep_test_and_clear_young(pte_t
*pte
)
1293 return __ptep_test_and_clear_young(pte
);
1297 static int stage2_pmdp_test_and_clear_young(pmd_t
*pmd
)
1299 return stage2_ptep_test_and_clear_young((pte_t
*)pmd
);
1302 static int stage2_pudp_test_and_clear_young(pud_t
*pud
)
1304 return stage2_ptep_test_and_clear_young((pte_t
*)pud
);
1308 * kvm_phys_addr_ioremap - map a device range to guest IPA
1310 * @kvm: The KVM pointer
1311 * @guest_ipa: The IPA at which to insert the mapping
1312 * @pa: The physical address of the device
1313 * @size: The size of the mapping
1315 int kvm_phys_addr_ioremap(struct kvm
*kvm
, phys_addr_t guest_ipa
,
1316 phys_addr_t pa
, unsigned long size
, bool writable
)
1318 phys_addr_t addr
, end
;
1321 struct kvm_mmu_memory_cache cache
= { 0, };
1323 end
= (guest_ipa
+ size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1324 pfn
= __phys_to_pfn(pa
);
1326 for (addr
= guest_ipa
; addr
< end
; addr
+= PAGE_SIZE
) {
1327 pte_t pte
= kvm_pfn_pte(pfn
, PAGE_S2_DEVICE
);
1330 pte
= kvm_s2pte_mkwrite(pte
);
1332 ret
= mmu_topup_memory_cache(&cache
,
1333 kvm_mmu_cache_min_pages(kvm
),
1337 spin_lock(&kvm
->mmu_lock
);
1338 ret
= stage2_set_pte(kvm
, &cache
, addr
, &pte
,
1339 KVM_S2PTE_FLAG_IS_IOMAP
);
1340 spin_unlock(&kvm
->mmu_lock
);
1348 mmu_free_memory_cache(&cache
);
1352 static bool transparent_hugepage_adjust(kvm_pfn_t
*pfnp
, phys_addr_t
*ipap
)
1354 kvm_pfn_t pfn
= *pfnp
;
1355 gfn_t gfn
= *ipap
>> PAGE_SHIFT
;
1356 struct page
*page
= pfn_to_page(pfn
);
1359 * PageTransCompoundMap() returns true for THP and
1360 * hugetlbfs. Make sure the adjustment is done only for THP
1363 if (!PageHuge(page
) && PageTransCompoundMap(page
)) {
1366 * The address we faulted on is backed by a transparent huge
1367 * page. However, because we map the compound huge page and
1368 * not the individual tail page, we need to transfer the
1369 * refcount to the head page. We have to be careful that the
1370 * THP doesn't start to split while we are adjusting the
1373 * We are sure this doesn't happen, because mmu_notifier_retry
1374 * was successful and we are holding the mmu_lock, so if this
1375 * THP is trying to split, it will be blocked in the mmu
1376 * notifier before touching any of the pages, specifically
1377 * before being able to call __split_huge_page_refcount().
1379 * We can therefore safely transfer the refcount from PG_tail
1380 * to PG_head and switch the pfn from a tail page to the head
1383 mask
= PTRS_PER_PMD
- 1;
1384 VM_BUG_ON((gfn
& mask
) != (pfn
& mask
));
1387 kvm_release_pfn_clean(pfn
);
1399 static bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
1401 if (kvm_vcpu_trap_is_iabt(vcpu
))
1404 return kvm_vcpu_dabt_iswrite(vcpu
);
1408 * stage2_wp_ptes - write protect PMD range
1409 * @pmd: pointer to pmd entry
1410 * @addr: range start address
1411 * @end: range end address
1413 static void stage2_wp_ptes(pmd_t
*pmd
, phys_addr_t addr
, phys_addr_t end
)
1417 pte
= pte_offset_kernel(pmd
, addr
);
1419 if (!pte_none(*pte
)) {
1420 if (!kvm_s2pte_readonly(pte
))
1421 kvm_set_s2pte_readonly(pte
);
1423 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1427 * stage2_wp_pmds - write protect PUD range
1428 * kvm: kvm instance for the VM
1429 * @pud: pointer to pud entry
1430 * @addr: range start address
1431 * @end: range end address
1433 static void stage2_wp_pmds(struct kvm
*kvm
, pud_t
*pud
,
1434 phys_addr_t addr
, phys_addr_t end
)
1439 pmd
= stage2_pmd_offset(kvm
, pud
, addr
);
1442 next
= stage2_pmd_addr_end(kvm
, addr
, end
);
1443 if (!pmd_none(*pmd
)) {
1444 if (pmd_thp_or_huge(*pmd
)) {
1445 if (!kvm_s2pmd_readonly(pmd
))
1446 kvm_set_s2pmd_readonly(pmd
);
1448 stage2_wp_ptes(pmd
, addr
, next
);
1451 } while (pmd
++, addr
= next
, addr
!= end
);
1455 * stage2_wp_puds - write protect PGD range
1456 * @pgd: pointer to pgd entry
1457 * @addr: range start address
1458 * @end: range end address
1460 * Process PUD entries, for a huge PUD we cause a panic.
1462 static void stage2_wp_puds(struct kvm
*kvm
, pgd_t
*pgd
,
1463 phys_addr_t addr
, phys_addr_t end
)
1468 pud
= stage2_pud_offset(kvm
, pgd
, addr
);
1470 next
= stage2_pud_addr_end(kvm
, addr
, end
);
1471 if (!stage2_pud_none(kvm
, *pud
)) {
1472 if (stage2_pud_huge(kvm
, *pud
)) {
1473 if (!kvm_s2pud_readonly(pud
))
1474 kvm_set_s2pud_readonly(pud
);
1476 stage2_wp_pmds(kvm
, pud
, addr
, next
);
1479 } while (pud
++, addr
= next
, addr
!= end
);
1483 * stage2_wp_range() - write protect stage2 memory region range
1484 * @kvm: The KVM pointer
1485 * @addr: Start address of range
1486 * @end: End address of range
1488 static void stage2_wp_range(struct kvm
*kvm
, phys_addr_t addr
, phys_addr_t end
)
1493 pgd
= kvm
->arch
.pgd
+ stage2_pgd_index(kvm
, addr
);
1496 * Release kvm_mmu_lock periodically if the memory region is
1497 * large. Otherwise, we may see kernel panics with
1498 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1499 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1500 * will also starve other vCPUs. We have to also make sure
1501 * that the page tables are not freed while we released
1504 cond_resched_lock(&kvm
->mmu_lock
);
1505 if (!READ_ONCE(kvm
->arch
.pgd
))
1507 next
= stage2_pgd_addr_end(kvm
, addr
, end
);
1508 if (stage2_pgd_present(kvm
, *pgd
))
1509 stage2_wp_puds(kvm
, pgd
, addr
, next
);
1510 } while (pgd
++, addr
= next
, addr
!= end
);
1514 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1515 * @kvm: The KVM pointer
1516 * @slot: The memory slot to write protect
1518 * Called to start logging dirty pages after memory region
1519 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1520 * all present PUD, PMD and PTEs are write protected in the memory region.
1521 * Afterwards read of dirty page log can be called.
1523 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1524 * serializing operations for VM memory regions.
1526 void kvm_mmu_wp_memory_region(struct kvm
*kvm
, int slot
)
1528 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
1529 struct kvm_memory_slot
*memslot
= id_to_memslot(slots
, slot
);
1530 phys_addr_t start
= memslot
->base_gfn
<< PAGE_SHIFT
;
1531 phys_addr_t end
= (memslot
->base_gfn
+ memslot
->npages
) << PAGE_SHIFT
;
1533 spin_lock(&kvm
->mmu_lock
);
1534 stage2_wp_range(kvm
, start
, end
);
1535 spin_unlock(&kvm
->mmu_lock
);
1536 kvm_flush_remote_tlbs(kvm
);
1540 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1541 * @kvm: The KVM pointer
1542 * @slot: The memory slot associated with mask
1543 * @gfn_offset: The gfn offset in memory slot
1544 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1545 * slot to be write protected
1547 * Walks bits set in mask write protects the associated pte's. Caller must
1548 * acquire kvm_mmu_lock.
1550 static void kvm_mmu_write_protect_pt_masked(struct kvm
*kvm
,
1551 struct kvm_memory_slot
*slot
,
1552 gfn_t gfn_offset
, unsigned long mask
)
1554 phys_addr_t base_gfn
= slot
->base_gfn
+ gfn_offset
;
1555 phys_addr_t start
= (base_gfn
+ __ffs(mask
)) << PAGE_SHIFT
;
1556 phys_addr_t end
= (base_gfn
+ __fls(mask
) + 1) << PAGE_SHIFT
;
1558 stage2_wp_range(kvm
, start
, end
);
1562 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1565 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1566 * enable dirty logging for them.
1568 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm
*kvm
,
1569 struct kvm_memory_slot
*slot
,
1570 gfn_t gfn_offset
, unsigned long mask
)
1572 kvm_mmu_write_protect_pt_masked(kvm
, slot
, gfn_offset
, mask
);
1575 static void clean_dcache_guest_page(kvm_pfn_t pfn
, unsigned long size
)
1577 __clean_dcache_guest_page(pfn
, size
);
1580 static void invalidate_icache_guest_page(kvm_pfn_t pfn
, unsigned long size
)
1582 __invalidate_icache_guest_page(pfn
, size
);
1585 static void kvm_send_hwpoison_signal(unsigned long address
,
1586 struct vm_area_struct
*vma
)
1590 if (is_vm_hugetlb_page(vma
))
1591 lsb
= huge_page_shift(hstate_vma(vma
));
1595 send_sig_mceerr(BUS_MCEERR_AR
, (void __user
*)address
, lsb
, current
);
1598 static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot
*memslot
,
1601 gpa_t gpa_start
, gpa_end
;
1602 hva_t uaddr_start
, uaddr_end
;
1605 size
= memslot
->npages
* PAGE_SIZE
;
1607 gpa_start
= memslot
->base_gfn
<< PAGE_SHIFT
;
1608 gpa_end
= gpa_start
+ size
;
1610 uaddr_start
= memslot
->userspace_addr
;
1611 uaddr_end
= uaddr_start
+ size
;
1614 * Pages belonging to memslots that don't have the same alignment
1615 * within a PMD for userspace and IPA cannot be mapped with stage-2
1616 * PMD entries, because we'll end up mapping the wrong pages.
1618 * Consider a layout like the following:
1620 * memslot->userspace_addr:
1621 * +-----+--------------------+--------------------+---+
1622 * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz|
1623 * +-----+--------------------+--------------------+---+
1625 * memslot->base_gfn << PAGE_SIZE:
1626 * +---+--------------------+--------------------+-----+
1627 * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz|
1628 * +---+--------------------+--------------------+-----+
1630 * If we create those stage-2 PMDs, we'll end up with this incorrect
1636 if ((gpa_start
& ~S2_PMD_MASK
) != (uaddr_start
& ~S2_PMD_MASK
))
1640 * Next, let's make sure we're not trying to map anything not covered
1641 * by the memslot. This means we have to prohibit PMD size mappings
1642 * for the beginning and end of a non-PMD aligned and non-PMD sized
1643 * memory slot (illustrated by the head and tail parts of the
1644 * userspace view above containing pages 'abcde' and 'xyz',
1647 * Note that it doesn't matter if we do the check using the
1648 * userspace_addr or the base_gfn, as both are equally aligned (per
1649 * the check above) and equally sized.
1651 return (hva
& S2_PMD_MASK
) >= uaddr_start
&&
1652 (hva
& S2_PMD_MASK
) + S2_PMD_SIZE
<= uaddr_end
;
1655 static int user_mem_abort(struct kvm_vcpu
*vcpu
, phys_addr_t fault_ipa
,
1656 struct kvm_memory_slot
*memslot
, unsigned long hva
,
1657 unsigned long fault_status
)
1660 bool write_fault
, writable
, force_pte
= false;
1661 bool exec_fault
, needs_exec
;
1662 unsigned long mmu_seq
;
1663 gfn_t gfn
= fault_ipa
>> PAGE_SHIFT
;
1664 struct kvm
*kvm
= vcpu
->kvm
;
1665 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
1666 struct vm_area_struct
*vma
;
1668 pgprot_t mem_type
= PAGE_S2
;
1669 bool logging_active
= memslot_is_logging(memslot
);
1670 unsigned long vma_pagesize
, flags
= 0;
1672 write_fault
= kvm_is_write_fault(vcpu
);
1673 exec_fault
= kvm_vcpu_trap_is_iabt(vcpu
);
1674 VM_BUG_ON(write_fault
&& exec_fault
);
1676 if (fault_status
== FSC_PERM
&& !write_fault
&& !exec_fault
) {
1677 kvm_err("Unexpected L2 read permission error\n");
1681 if (!fault_supports_stage2_pmd_mappings(memslot
, hva
))
1687 /* Let's check if we will get back a huge page backed by hugetlbfs */
1688 down_read(¤t
->mm
->mmap_sem
);
1689 vma
= find_vma_intersection(current
->mm
, hva
, hva
+ 1);
1690 if (unlikely(!vma
)) {
1691 kvm_err("Failed to find VMA for hva 0x%lx\n", hva
);
1692 up_read(¤t
->mm
->mmap_sem
);
1696 vma_pagesize
= vma_kernel_pagesize(vma
);
1698 * PUD level may not exist for a VM but PMD is guaranteed to
1701 if ((vma_pagesize
== PMD_SIZE
||
1702 (vma_pagesize
== PUD_SIZE
&& kvm_stage2_has_pud(kvm
))) &&
1704 gfn
= (fault_ipa
& huge_page_mask(hstate_vma(vma
))) >> PAGE_SHIFT
;
1706 up_read(¤t
->mm
->mmap_sem
);
1708 /* We need minimum second+third level pages */
1709 ret
= mmu_topup_memory_cache(memcache
, kvm_mmu_cache_min_pages(kvm
),
1714 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
1716 * Ensure the read of mmu_notifier_seq happens before we call
1717 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1718 * the page we just got a reference to gets unmapped before we have a
1719 * chance to grab the mmu_lock, which ensure that if the page gets
1720 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1721 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1722 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1726 pfn
= gfn_to_pfn_prot(kvm
, gfn
, write_fault
, &writable
);
1727 if (pfn
== KVM_PFN_ERR_HWPOISON
) {
1728 kvm_send_hwpoison_signal(hva
, vma
);
1731 if (is_error_noslot_pfn(pfn
))
1734 if (kvm_is_device_pfn(pfn
)) {
1735 mem_type
= PAGE_S2_DEVICE
;
1736 flags
|= KVM_S2PTE_FLAG_IS_IOMAP
;
1737 } else if (logging_active
) {
1739 * Faults on pages in a memslot with logging enabled
1740 * should not be mapped with huge pages (it introduces churn
1741 * and performance degradation), so force a pte mapping.
1743 flags
|= KVM_S2_FLAG_LOGGING_ACTIVE
;
1746 * Only actually map the page as writable if this was a write
1753 spin_lock(&kvm
->mmu_lock
);
1754 if (mmu_notifier_retry(kvm
, mmu_seq
))
1757 if (vma_pagesize
== PAGE_SIZE
&& !force_pte
) {
1759 * Only PMD_SIZE transparent hugepages(THP) are
1760 * currently supported. This code will need to be
1761 * updated to support other THP sizes.
1763 if (transparent_hugepage_adjust(&pfn
, &fault_ipa
))
1764 vma_pagesize
= PMD_SIZE
;
1768 kvm_set_pfn_dirty(pfn
);
1770 if (fault_status
!= FSC_PERM
)
1771 clean_dcache_guest_page(pfn
, vma_pagesize
);
1774 invalidate_icache_guest_page(pfn
, vma_pagesize
);
1777 * If we took an execution fault we have made the
1778 * icache/dcache coherent above and should now let the s2
1779 * mapping be executable.
1781 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1782 * execute permissions, and we preserve whatever we have.
1784 needs_exec
= exec_fault
||
1785 (fault_status
== FSC_PERM
&& stage2_is_exec(kvm
, fault_ipa
));
1787 if (vma_pagesize
== PUD_SIZE
) {
1788 pud_t new_pud
= kvm_pfn_pud(pfn
, mem_type
);
1790 new_pud
= kvm_pud_mkhuge(new_pud
);
1792 new_pud
= kvm_s2pud_mkwrite(new_pud
);
1795 new_pud
= kvm_s2pud_mkexec(new_pud
);
1797 ret
= stage2_set_pud_huge(kvm
, memcache
, fault_ipa
, &new_pud
);
1798 } else if (vma_pagesize
== PMD_SIZE
) {
1799 pmd_t new_pmd
= kvm_pfn_pmd(pfn
, mem_type
);
1801 new_pmd
= kvm_pmd_mkhuge(new_pmd
);
1804 new_pmd
= kvm_s2pmd_mkwrite(new_pmd
);
1807 new_pmd
= kvm_s2pmd_mkexec(new_pmd
);
1809 ret
= stage2_set_pmd_huge(kvm
, memcache
, fault_ipa
, &new_pmd
);
1811 pte_t new_pte
= kvm_pfn_pte(pfn
, mem_type
);
1814 new_pte
= kvm_s2pte_mkwrite(new_pte
);
1815 mark_page_dirty(kvm
, gfn
);
1819 new_pte
= kvm_s2pte_mkexec(new_pte
);
1821 ret
= stage2_set_pte(kvm
, memcache
, fault_ipa
, &new_pte
, flags
);
1825 spin_unlock(&kvm
->mmu_lock
);
1826 kvm_set_pfn_accessed(pfn
);
1827 kvm_release_pfn_clean(pfn
);
1832 * Resolve the access fault by making the page young again.
1833 * Note that because the faulting entry is guaranteed not to be
1834 * cached in the TLB, we don't need to invalidate anything.
1835 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1836 * so there is no need for atomic (pte|pmd)_mkyoung operations.
1838 static void handle_access_fault(struct kvm_vcpu
*vcpu
, phys_addr_t fault_ipa
)
1844 bool pfn_valid
= false;
1846 trace_kvm_access_fault(fault_ipa
);
1848 spin_lock(&vcpu
->kvm
->mmu_lock
);
1850 if (!stage2_get_leaf_entry(vcpu
->kvm
, fault_ipa
, &pud
, &pmd
, &pte
))
1853 if (pud
) { /* HugeTLB */
1854 *pud
= kvm_s2pud_mkyoung(*pud
);
1855 pfn
= kvm_pud_pfn(*pud
);
1857 } else if (pmd
) { /* THP, HugeTLB */
1858 *pmd
= pmd_mkyoung(*pmd
);
1859 pfn
= pmd_pfn(*pmd
);
1862 *pte
= pte_mkyoung(*pte
); /* Just a page... */
1863 pfn
= pte_pfn(*pte
);
1868 spin_unlock(&vcpu
->kvm
->mmu_lock
);
1870 kvm_set_pfn_accessed(pfn
);
1874 * kvm_handle_guest_abort - handles all 2nd stage aborts
1875 * @vcpu: the VCPU pointer
1876 * @run: the kvm_run structure
1878 * Any abort that gets to the host is almost guaranteed to be caused by a
1879 * missing second stage translation table entry, which can mean that either the
1880 * guest simply needs more memory and we must allocate an appropriate page or it
1881 * can mean that the guest tried to access I/O memory, which is emulated by user
1882 * space. The distinction is based on the IPA causing the fault and whether this
1883 * memory region has been registered as standard RAM by user space.
1885 int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1887 unsigned long fault_status
;
1888 phys_addr_t fault_ipa
;
1889 struct kvm_memory_slot
*memslot
;
1891 bool is_iabt
, write_fault
, writable
;
1895 fault_status
= kvm_vcpu_trap_get_fault_type(vcpu
);
1897 fault_ipa
= kvm_vcpu_get_fault_ipa(vcpu
);
1898 is_iabt
= kvm_vcpu_trap_is_iabt(vcpu
);
1900 /* Synchronous External Abort? */
1901 if (kvm_vcpu_dabt_isextabt(vcpu
)) {
1903 * For RAS the host kernel may handle this abort.
1904 * There is no need to pass the error into the guest.
1906 if (!handle_guest_sea(fault_ipa
, kvm_vcpu_get_hsr(vcpu
)))
1909 if (unlikely(!is_iabt
)) {
1910 kvm_inject_vabt(vcpu
);
1915 trace_kvm_guest_fault(*vcpu_pc(vcpu
), kvm_vcpu_get_hsr(vcpu
),
1916 kvm_vcpu_get_hfar(vcpu
), fault_ipa
);
1918 /* Check the stage-2 fault is trans. fault or write fault */
1919 if (fault_status
!= FSC_FAULT
&& fault_status
!= FSC_PERM
&&
1920 fault_status
!= FSC_ACCESS
) {
1921 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1922 kvm_vcpu_trap_get_class(vcpu
),
1923 (unsigned long)kvm_vcpu_trap_get_fault(vcpu
),
1924 (unsigned long)kvm_vcpu_get_hsr(vcpu
));
1928 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1930 gfn
= fault_ipa
>> PAGE_SHIFT
;
1931 memslot
= gfn_to_memslot(vcpu
->kvm
, gfn
);
1932 hva
= gfn_to_hva_memslot_prot(memslot
, gfn
, &writable
);
1933 write_fault
= kvm_is_write_fault(vcpu
);
1934 if (kvm_is_error_hva(hva
) || (write_fault
&& !writable
)) {
1936 /* Prefetch Abort on I/O address */
1937 kvm_inject_pabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
1943 * Check for a cache maintenance operation. Since we
1944 * ended-up here, we know it is outside of any memory
1945 * slot. But we can't find out if that is for a device,
1946 * or if the guest is just being stupid. The only thing
1947 * we know for sure is that this range cannot be cached.
1949 * So let's assume that the guest is just being
1950 * cautious, and skip the instruction.
1952 if (kvm_vcpu_dabt_is_cm(vcpu
)) {
1953 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1959 * The IPA is reported as [MAX:12], so we need to
1960 * complement it with the bottom 12 bits from the
1961 * faulting VA. This is always 12 bits, irrespective
1964 fault_ipa
|= kvm_vcpu_get_hfar(vcpu
) & ((1 << 12) - 1);
1965 ret
= io_mem_abort(vcpu
, run
, fault_ipa
);
1969 /* Userspace should not be able to register out-of-bounds IPAs */
1970 VM_BUG_ON(fault_ipa
>= kvm_phys_size(vcpu
->kvm
));
1972 if (fault_status
== FSC_ACCESS
) {
1973 handle_access_fault(vcpu
, fault_ipa
);
1978 ret
= user_mem_abort(vcpu
, fault_ipa
, memslot
, hva
, fault_status
);
1982 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1986 static int handle_hva_to_gpa(struct kvm
*kvm
,
1987 unsigned long start
,
1989 int (*handler
)(struct kvm
*kvm
,
1990 gpa_t gpa
, u64 size
,
1994 struct kvm_memslots
*slots
;
1995 struct kvm_memory_slot
*memslot
;
1998 slots
= kvm_memslots(kvm
);
2000 /* we only care about the pages that the guest sees */
2001 kvm_for_each_memslot(memslot
, slots
) {
2002 unsigned long hva_start
, hva_end
;
2005 hva_start
= max(start
, memslot
->userspace_addr
);
2006 hva_end
= min(end
, memslot
->userspace_addr
+
2007 (memslot
->npages
<< PAGE_SHIFT
));
2008 if (hva_start
>= hva_end
)
2011 gpa
= hva_to_gfn_memslot(hva_start
, memslot
) << PAGE_SHIFT
;
2012 ret
|= handler(kvm
, gpa
, (u64
)(hva_end
- hva_start
), data
);
2018 static int kvm_unmap_hva_handler(struct kvm
*kvm
, gpa_t gpa
, u64 size
, void *data
)
2020 unmap_stage2_range(kvm
, gpa
, size
);
2024 int kvm_unmap_hva_range(struct kvm
*kvm
,
2025 unsigned long start
, unsigned long end
)
2030 trace_kvm_unmap_hva_range(start
, end
);
2031 handle_hva_to_gpa(kvm
, start
, end
, &kvm_unmap_hva_handler
, NULL
);
2035 static int kvm_set_spte_handler(struct kvm
*kvm
, gpa_t gpa
, u64 size
, void *data
)
2037 pte_t
*pte
= (pte_t
*)data
;
2039 WARN_ON(size
!= PAGE_SIZE
);
2041 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2042 * flag clear because MMU notifiers will have unmapped a huge PMD before
2043 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2044 * therefore stage2_set_pte() never needs to clear out a huge PMD
2045 * through this calling path.
2047 stage2_set_pte(kvm
, NULL
, gpa
, pte
, 0);
2052 int kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
2054 unsigned long end
= hva
+ PAGE_SIZE
;
2055 kvm_pfn_t pfn
= pte_pfn(pte
);
2061 trace_kvm_set_spte_hva(hva
);
2064 * We've moved a page around, probably through CoW, so let's treat it
2065 * just like a translation fault and clean the cache to the PoC.
2067 clean_dcache_guest_page(pfn
, PAGE_SIZE
);
2068 stage2_pte
= kvm_pfn_pte(pfn
, PAGE_S2
);
2069 handle_hva_to_gpa(kvm
, hva
, end
, &kvm_set_spte_handler
, &stage2_pte
);
2074 static int kvm_age_hva_handler(struct kvm
*kvm
, gpa_t gpa
, u64 size
, void *data
)
2080 WARN_ON(size
!= PAGE_SIZE
&& size
!= PMD_SIZE
&& size
!= PUD_SIZE
);
2081 if (!stage2_get_leaf_entry(kvm
, gpa
, &pud
, &pmd
, &pte
))
2085 return stage2_pudp_test_and_clear_young(pud
);
2087 return stage2_pmdp_test_and_clear_young(pmd
);
2089 return stage2_ptep_test_and_clear_young(pte
);
2092 static int kvm_test_age_hva_handler(struct kvm
*kvm
, gpa_t gpa
, u64 size
, void *data
)
2098 WARN_ON(size
!= PAGE_SIZE
&& size
!= PMD_SIZE
&& size
!= PUD_SIZE
);
2099 if (!stage2_get_leaf_entry(kvm
, gpa
, &pud
, &pmd
, &pte
))
2103 return kvm_s2pud_young(*pud
);
2105 return pmd_young(*pmd
);
2107 return pte_young(*pte
);
2110 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
2114 trace_kvm_age_hva(start
, end
);
2115 return handle_hva_to_gpa(kvm
, start
, end
, kvm_age_hva_handler
, NULL
);
2118 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
2122 trace_kvm_test_age_hva(hva
);
2123 return handle_hva_to_gpa(kvm
, hva
, hva
, kvm_test_age_hva_handler
, NULL
);
2126 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
2128 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
2131 phys_addr_t
kvm_mmu_get_httbr(void)
2133 if (__kvm_cpu_uses_extended_idmap())
2134 return virt_to_phys(merged_hyp_pgd
);
2136 return virt_to_phys(hyp_pgd
);
2139 phys_addr_t
kvm_get_idmap_vector(void)
2141 return hyp_idmap_vector
;
2144 static int kvm_map_idmap_text(pgd_t
*pgd
)
2148 /* Create the idmap in the boot page tables */
2149 err
= __create_hyp_mappings(pgd
, __kvm_idmap_ptrs_per_pgd(),
2150 hyp_idmap_start
, hyp_idmap_end
,
2151 __phys_to_pfn(hyp_idmap_start
),
2154 kvm_err("Failed to idmap %lx-%lx\n",
2155 hyp_idmap_start
, hyp_idmap_end
);
2160 int kvm_mmu_init(void)
2164 hyp_idmap_start
= kvm_virt_to_phys(__hyp_idmap_text_start
);
2165 hyp_idmap_start
= ALIGN_DOWN(hyp_idmap_start
, PAGE_SIZE
);
2166 hyp_idmap_end
= kvm_virt_to_phys(__hyp_idmap_text_end
);
2167 hyp_idmap_end
= ALIGN(hyp_idmap_end
, PAGE_SIZE
);
2168 hyp_idmap_vector
= kvm_virt_to_phys(__kvm_hyp_init
);
2171 * We rely on the linker script to ensure at build time that the HYP
2172 * init code does not cross a page boundary.
2174 BUG_ON((hyp_idmap_start
^ (hyp_idmap_end
- 1)) & PAGE_MASK
);
2176 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start
);
2177 kvm_debug("HYP VA range: %lx:%lx\n",
2178 kern_hyp_va(PAGE_OFFSET
),
2179 kern_hyp_va((unsigned long)high_memory
- 1));
2181 if (hyp_idmap_start
>= kern_hyp_va(PAGE_OFFSET
) &&
2182 hyp_idmap_start
< kern_hyp_va((unsigned long)high_memory
- 1) &&
2183 hyp_idmap_start
!= (unsigned long)__hyp_idmap_text_start
) {
2185 * The idmap page is intersecting with the VA space,
2186 * it is not safe to continue further.
2188 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2193 hyp_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, hyp_pgd_order
);
2195 kvm_err("Hyp mode PGD not allocated\n");
2200 if (__kvm_cpu_uses_extended_idmap()) {
2201 boot_hyp_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
2203 if (!boot_hyp_pgd
) {
2204 kvm_err("Hyp boot PGD not allocated\n");
2209 err
= kvm_map_idmap_text(boot_hyp_pgd
);
2213 merged_hyp_pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
2214 if (!merged_hyp_pgd
) {
2215 kvm_err("Failed to allocate extra HYP pgd\n");
2218 __kvm_extend_hypmap(boot_hyp_pgd
, hyp_pgd
, merged_hyp_pgd
,
2221 err
= kvm_map_idmap_text(hyp_pgd
);
2226 io_map_base
= hyp_idmap_start
;
2233 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2234 const struct kvm_userspace_memory_region
*mem
,
2235 const struct kvm_memory_slot
*old
,
2236 const struct kvm_memory_slot
*new,
2237 enum kvm_mr_change change
)
2240 * At this point memslot has been committed and there is an
2241 * allocated dirty_bitmap[], dirty pages will be be tracked while the
2242 * memory slot is write protected.
2244 if (change
!= KVM_MR_DELETE
&& mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
)
2245 kvm_mmu_wp_memory_region(kvm
, mem
->slot
);
2248 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2249 struct kvm_memory_slot
*memslot
,
2250 const struct kvm_userspace_memory_region
*mem
,
2251 enum kvm_mr_change change
)
2253 hva_t hva
= mem
->userspace_addr
;
2254 hva_t reg_end
= hva
+ mem
->memory_size
;
2255 bool writable
= !(mem
->flags
& KVM_MEM_READONLY
);
2258 if (change
!= KVM_MR_CREATE
&& change
!= KVM_MR_MOVE
&&
2259 change
!= KVM_MR_FLAGS_ONLY
)
2263 * Prevent userspace from creating a memory region outside of the IPA
2264 * space addressable by the KVM guest IPA space.
2266 if (memslot
->base_gfn
+ memslot
->npages
>=
2267 (kvm_phys_size(kvm
) >> PAGE_SHIFT
))
2270 down_read(¤t
->mm
->mmap_sem
);
2272 * A memory region could potentially cover multiple VMAs, and any holes
2273 * between them, so iterate over all of them to find out if we can map
2274 * any of them right now.
2276 * +--------------------------------------------+
2277 * +---------------+----------------+ +----------------+
2278 * | : VMA 1 | VMA 2 | | VMA 3 : |
2279 * +---------------+----------------+ +----------------+
2281 * +--------------------------------------------+
2284 struct vm_area_struct
*vma
= find_vma(current
->mm
, hva
);
2285 hva_t vm_start
, vm_end
;
2287 if (!vma
|| vma
->vm_start
>= reg_end
)
2291 * Mapping a read-only VMA is only allowed if the
2292 * memory region is configured as read-only.
2294 if (writable
&& !(vma
->vm_flags
& VM_WRITE
)) {
2300 * Take the intersection of this VMA with the memory region
2302 vm_start
= max(hva
, vma
->vm_start
);
2303 vm_end
= min(reg_end
, vma
->vm_end
);
2305 if (vma
->vm_flags
& VM_PFNMAP
) {
2306 gpa_t gpa
= mem
->guest_phys_addr
+
2307 (vm_start
- mem
->userspace_addr
);
2310 pa
= (phys_addr_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
2311 pa
+= vm_start
- vma
->vm_start
;
2313 /* IO region dirty page logging not allowed */
2314 if (memslot
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
2319 ret
= kvm_phys_addr_ioremap(kvm
, gpa
, pa
,
2326 } while (hva
< reg_end
);
2328 if (change
== KVM_MR_FLAGS_ONLY
)
2331 spin_lock(&kvm
->mmu_lock
);
2333 unmap_stage2_range(kvm
, mem
->guest_phys_addr
, mem
->memory_size
);
2335 stage2_flush_memslot(kvm
, memslot
);
2336 spin_unlock(&kvm
->mmu_lock
);
2338 up_read(¤t
->mm
->mmap_sem
);
2342 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
2343 struct kvm_memory_slot
*dont
)
2347 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2348 unsigned long npages
)
2353 void kvm_arch_memslots_updated(struct kvm
*kvm
, struct kvm_memslots
*slots
)
2357 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
2359 kvm_free_stage2_pgd(kvm
);
2362 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
2363 struct kvm_memory_slot
*slot
)
2365 gpa_t gpa
= slot
->base_gfn
<< PAGE_SHIFT
;
2366 phys_addr_t size
= slot
->npages
<< PAGE_SHIFT
;
2368 spin_lock(&kvm
->mmu_lock
);
2369 unmap_stage2_range(kvm
, gpa
, size
);
2370 spin_unlock(&kvm
->mmu_lock
);
2374 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2377 * - S/W ops are local to a CPU (not broadcast)
2378 * - We have line migration behind our back (speculation)
2379 * - System caches don't support S/W at all (damn!)
2381 * In the face of the above, the best we can do is to try and convert
2382 * S/W ops to VA ops. Because the guest is not allowed to infer the
2383 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2384 * which is a rather good thing for us.
2386 * Also, it is only used when turning caches on/off ("The expected
2387 * usage of the cache maintenance instructions that operate by set/way
2388 * is associated with the cache maintenance instructions associated
2389 * with the powerdown and powerup of caches, if this is required by
2390 * the implementation.").
2392 * We use the following policy:
2394 * - If we trap a S/W operation, we enable VM trapping to detect
2395 * caches being turned on/off, and do a full clean.
2397 * - We flush the caches on both caches being turned on and off.
2399 * - Once the caches are enabled, we stop trapping VM ops.
2401 void kvm_set_way_flush(struct kvm_vcpu
*vcpu
)
2403 unsigned long hcr
= *vcpu_hcr(vcpu
);
2406 * If this is the first time we do a S/W operation
2407 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2410 * Otherwise, rely on the VM trapping to wait for the MMU +
2411 * Caches to be turned off. At that point, we'll be able to
2412 * clean the caches again.
2414 if (!(hcr
& HCR_TVM
)) {
2415 trace_kvm_set_way_flush(*vcpu_pc(vcpu
),
2416 vcpu_has_cache_enabled(vcpu
));
2417 stage2_flush_vm(vcpu
->kvm
);
2418 *vcpu_hcr(vcpu
) = hcr
| HCR_TVM
;
2422 void kvm_toggle_cache(struct kvm_vcpu
*vcpu
, bool was_enabled
)
2424 bool now_enabled
= vcpu_has_cache_enabled(vcpu
);
2427 * If switching the MMU+caches on, need to invalidate the caches.
2428 * If switching it off, need to clean the caches.
2429 * Clean + invalidate does the trick always.
2431 if (now_enabled
!= was_enabled
)
2432 stage2_flush_vm(vcpu
->kvm
);
2434 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2436 *vcpu_hcr(vcpu
) &= ~HCR_TVM
;
2438 trace_kvm_toggle_cache(*vcpu_pc(vcpu
), was_enabled
, now_enabled
);