2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <trace/events/kvm.h>
23 #include <asm/pgalloc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/kvm_mmio.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_emulate.h>
33 extern char __hyp_idmap_text_start
[], __hyp_idmap_text_end
[];
35 static pgd_t
*boot_hyp_pgd
;
36 static pgd_t
*hyp_pgd
;
37 static DEFINE_MUTEX(kvm_hyp_pgd_mutex
);
39 static void *init_bounce_page
;
40 static unsigned long hyp_idmap_start
;
41 static unsigned long hyp_idmap_end
;
42 static phys_addr_t hyp_idmap_vector
;
44 static void kvm_tlb_flush_vmid_ipa(struct kvm
*kvm
, phys_addr_t ipa
)
46 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa
, kvm
, ipa
);
49 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
54 BUG_ON(max
> KVM_NR_MEM_OBJS
);
55 if (cache
->nobjs
>= min
)
57 while (cache
->nobjs
< max
) {
58 page
= (void *)__get_free_page(PGALLOC_GFP
);
61 cache
->objects
[cache
->nobjs
++] = page
;
66 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
)
69 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
72 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
)
76 BUG_ON(!mc
|| !mc
->nobjs
);
77 p
= mc
->objects
[--mc
->nobjs
];
81 static void clear_pud_entry(pud_t
*pud
)
83 pmd_t
*pmd_table
= pmd_offset(pud
, 0);
85 pmd_free(NULL
, pmd_table
);
86 put_page(virt_to_page(pud
));
89 static void clear_pmd_entry(pmd_t
*pmd
)
91 pte_t
*pte_table
= pte_offset_kernel(pmd
, 0);
93 pte_free_kernel(NULL
, pte_table
);
94 put_page(virt_to_page(pmd
));
97 static bool pmd_empty(pmd_t
*pmd
)
99 struct page
*pmd_page
= virt_to_page(pmd
);
100 return page_count(pmd_page
) == 1;
103 static void clear_pte_entry(pte_t
*pte
)
105 if (pte_present(*pte
)) {
106 kvm_set_pte(pte
, __pte(0));
107 put_page(virt_to_page(pte
));
111 static bool pte_empty(pte_t
*pte
)
113 struct page
*pte_page
= virt_to_page(pte
);
114 return page_count(pte_page
) == 1;
117 static void unmap_range(pgd_t
*pgdp
, unsigned long long start
, u64 size
)
123 unsigned long long addr
= start
, end
= start
+ size
;
127 pgd
= pgdp
+ pgd_index(addr
);
128 pud
= pud_offset(pgd
, addr
);
129 if (pud_none(*pud
)) {
134 pmd
= pmd_offset(pud
, addr
);
135 if (pmd_none(*pmd
)) {
140 pte
= pte_offset_kernel(pmd
, addr
);
141 clear_pte_entry(pte
);
144 /* If we emptied the pte, walk back up the ladder */
145 if (pte_empty(pte
)) {
146 clear_pmd_entry(pmd
);
148 if (pmd_empty(pmd
)) {
149 clear_pud_entry(pud
);
159 * free_boot_hyp_pgd - free HYP boot page tables
161 * Free the HYP boot page tables. The bounce page is also freed.
163 void free_boot_hyp_pgd(void)
165 mutex_lock(&kvm_hyp_pgd_mutex
);
168 unmap_range(boot_hyp_pgd
, hyp_idmap_start
, PAGE_SIZE
);
169 unmap_range(boot_hyp_pgd
, TRAMPOLINE_VA
, PAGE_SIZE
);
175 unmap_range(hyp_pgd
, TRAMPOLINE_VA
, PAGE_SIZE
);
177 kfree(init_bounce_page
);
178 init_bounce_page
= NULL
;
180 mutex_unlock(&kvm_hyp_pgd_mutex
);
184 * free_hyp_pgds - free Hyp-mode page tables
186 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
187 * therefore contains either mappings in the kernel memory area (above
188 * PAGE_OFFSET), or device mappings in the vmalloc range (from
189 * VMALLOC_START to VMALLOC_END).
191 * boot_hyp_pgd should only map two pages for the init code.
193 void free_hyp_pgds(void)
199 mutex_lock(&kvm_hyp_pgd_mutex
);
202 for (addr
= PAGE_OFFSET
; virt_addr_valid(addr
); addr
+= PGDIR_SIZE
)
203 unmap_range(hyp_pgd
, KERN_TO_HYP(addr
), PGDIR_SIZE
);
204 for (addr
= VMALLOC_START
; is_vmalloc_addr((void*)addr
); addr
+= PGDIR_SIZE
)
205 unmap_range(hyp_pgd
, KERN_TO_HYP(addr
), PGDIR_SIZE
);
210 mutex_unlock(&kvm_hyp_pgd_mutex
);
213 static void create_hyp_pte_mappings(pmd_t
*pmd
, unsigned long start
,
214 unsigned long end
, unsigned long pfn
,
222 pte
= pte_offset_kernel(pmd
, addr
);
223 kvm_set_pte(pte
, pfn_pte(pfn
, prot
));
224 get_page(virt_to_page(pte
));
225 kvm_flush_dcache_to_poc(pte
, sizeof(*pte
));
227 } while (addr
+= PAGE_SIZE
, addr
!= end
);
230 static int create_hyp_pmd_mappings(pud_t
*pud
, unsigned long start
,
231 unsigned long end
, unsigned long pfn
,
236 unsigned long addr
, next
;
240 pmd
= pmd_offset(pud
, addr
);
242 BUG_ON(pmd_sect(*pmd
));
244 if (pmd_none(*pmd
)) {
245 pte
= pte_alloc_one_kernel(NULL
, addr
);
247 kvm_err("Cannot allocate Hyp pte\n");
250 pmd_populate_kernel(NULL
, pmd
, pte
);
251 get_page(virt_to_page(pmd
));
252 kvm_flush_dcache_to_poc(pmd
, sizeof(*pmd
));
255 next
= pmd_addr_end(addr
, end
);
257 create_hyp_pte_mappings(pmd
, addr
, next
, pfn
, prot
);
258 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
259 } while (addr
= next
, addr
!= end
);
264 static int __create_hyp_mappings(pgd_t
*pgdp
,
265 unsigned long start
, unsigned long end
,
266 unsigned long pfn
, pgprot_t prot
)
271 unsigned long addr
, next
;
274 mutex_lock(&kvm_hyp_pgd_mutex
);
275 addr
= start
& PAGE_MASK
;
276 end
= PAGE_ALIGN(end
);
278 pgd
= pgdp
+ pgd_index(addr
);
279 pud
= pud_offset(pgd
, addr
);
281 if (pud_none_or_clear_bad(pud
)) {
282 pmd
= pmd_alloc_one(NULL
, addr
);
284 kvm_err("Cannot allocate Hyp pmd\n");
288 pud_populate(NULL
, pud
, pmd
);
289 get_page(virt_to_page(pud
));
290 kvm_flush_dcache_to_poc(pud
, sizeof(*pud
));
293 next
= pgd_addr_end(addr
, end
);
294 err
= create_hyp_pmd_mappings(pud
, addr
, next
, pfn
, prot
);
297 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
298 } while (addr
= next
, addr
!= end
);
300 mutex_unlock(&kvm_hyp_pgd_mutex
);
305 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
306 * @from: The virtual kernel start address of the range
307 * @to: The virtual kernel end address of the range (exclusive)
309 * The same virtual address as the kernel virtual address is also used
310 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
313 int create_hyp_mappings(void *from
, void *to
)
315 unsigned long phys_addr
= virt_to_phys(from
);
316 unsigned long start
= KERN_TO_HYP((unsigned long)from
);
317 unsigned long end
= KERN_TO_HYP((unsigned long)to
);
319 /* Check for a valid kernel memory mapping */
320 if (!virt_addr_valid(from
) || !virt_addr_valid(to
- 1))
323 return __create_hyp_mappings(hyp_pgd
, start
, end
,
324 __phys_to_pfn(phys_addr
), PAGE_HYP
);
328 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
329 * @from: The kernel start VA of the range
330 * @to: The kernel end VA of the range (exclusive)
331 * @phys_addr: The physical start address which gets mapped
333 * The resulting HYP VA is the same as the kernel VA, modulo
336 int create_hyp_io_mappings(void *from
, void *to
, phys_addr_t phys_addr
)
338 unsigned long start
= KERN_TO_HYP((unsigned long)from
);
339 unsigned long end
= KERN_TO_HYP((unsigned long)to
);
341 /* Check for a valid kernel IO mapping */
342 if (!is_vmalloc_addr(from
) || !is_vmalloc_addr(to
- 1))
345 return __create_hyp_mappings(hyp_pgd
, start
, end
,
346 __phys_to_pfn(phys_addr
), PAGE_HYP_DEVICE
);
350 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
351 * @kvm: The KVM struct pointer for the VM.
353 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
354 * support either full 40-bit input addresses or limited to 32-bit input
355 * addresses). Clears the allocated pages.
357 * Note we don't need locking here as this is only called when the VM is
358 * created, which can only be done once.
360 int kvm_alloc_stage2_pgd(struct kvm
*kvm
)
364 if (kvm
->arch
.pgd
!= NULL
) {
365 kvm_err("kvm_arch already initialized?\n");
369 pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
, S2_PGD_ORDER
);
373 /* stage-2 pgd must be aligned to its size */
374 VM_BUG_ON((unsigned long)pgd
& (S2_PGD_SIZE
- 1));
376 memset(pgd
, 0, PTRS_PER_S2_PGD
* sizeof(pgd_t
));
384 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
385 * @kvm: The VM pointer
386 * @start: The intermediate physical base address of the range to unmap
387 * @size: The size of the area to unmap
389 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
390 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
391 * destroying the VM), otherwise another faulting VCPU may come in and mess
392 * with things behind our backs.
394 static void unmap_stage2_range(struct kvm
*kvm
, phys_addr_t start
, u64 size
)
396 unmap_range(kvm
->arch
.pgd
, start
, size
);
400 * kvm_free_stage2_pgd - free all stage-2 tables
401 * @kvm: The KVM struct pointer for the VM.
403 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
404 * underlying level-2 and level-3 tables before freeing the actual level-1 table
405 * and setting the struct pointer to NULL.
407 * Note we don't need locking here as this is only called when the VM is
408 * destroyed, which can only be done once.
410 void kvm_free_stage2_pgd(struct kvm
*kvm
)
412 if (kvm
->arch
.pgd
== NULL
)
415 unmap_stage2_range(kvm
, 0, KVM_PHYS_SIZE
);
416 free_pages((unsigned long)kvm
->arch
.pgd
, S2_PGD_ORDER
);
417 kvm
->arch
.pgd
= NULL
;
421 static int stage2_set_pte(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
422 phys_addr_t addr
, const pte_t
*new_pte
, bool iomap
)
429 /* Create 2nd stage page table mapping - Level 1 */
430 pgd
= kvm
->arch
.pgd
+ pgd_index(addr
);
431 pud
= pud_offset(pgd
, addr
);
432 if (pud_none(*pud
)) {
434 return 0; /* ignore calls from kvm_set_spte_hva */
435 pmd
= mmu_memory_cache_alloc(cache
);
436 pud_populate(NULL
, pud
, pmd
);
437 get_page(virt_to_page(pud
));
440 pmd
= pmd_offset(pud
, addr
);
442 /* Create 2nd stage page table mapping - Level 2 */
443 if (pmd_none(*pmd
)) {
445 return 0; /* ignore calls from kvm_set_spte_hva */
446 pte
= mmu_memory_cache_alloc(cache
);
448 pmd_populate_kernel(NULL
, pmd
, pte
);
449 get_page(virt_to_page(pmd
));
452 pte
= pte_offset_kernel(pmd
, addr
);
454 if (iomap
&& pte_present(*pte
))
457 /* Create 2nd stage page table mapping - Level 3 */
459 kvm_set_pte(pte
, *new_pte
);
460 if (pte_present(old_pte
))
461 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
463 get_page(virt_to_page(pte
));
469 * kvm_phys_addr_ioremap - map a device range to guest IPA
471 * @kvm: The KVM pointer
472 * @guest_ipa: The IPA at which to insert the mapping
473 * @pa: The physical address of the device
474 * @size: The size of the mapping
476 int kvm_phys_addr_ioremap(struct kvm
*kvm
, phys_addr_t guest_ipa
,
477 phys_addr_t pa
, unsigned long size
)
479 phys_addr_t addr
, end
;
482 struct kvm_mmu_memory_cache cache
= { 0, };
484 end
= (guest_ipa
+ size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
485 pfn
= __phys_to_pfn(pa
);
487 for (addr
= guest_ipa
; addr
< end
; addr
+= PAGE_SIZE
) {
488 pte_t pte
= pfn_pte(pfn
, PAGE_S2_DEVICE
);
489 kvm_set_s2pte_writable(&pte
);
491 ret
= mmu_topup_memory_cache(&cache
, 2, 2);
494 spin_lock(&kvm
->mmu_lock
);
495 ret
= stage2_set_pte(kvm
, &cache
, addr
, &pte
, true);
496 spin_unlock(&kvm
->mmu_lock
);
504 mmu_free_memory_cache(&cache
);
508 static int user_mem_abort(struct kvm_vcpu
*vcpu
, phys_addr_t fault_ipa
,
509 gfn_t gfn
, struct kvm_memory_slot
*memslot
,
510 unsigned long fault_status
)
515 bool write_fault
, writable
;
516 unsigned long mmu_seq
;
517 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
519 write_fault
= kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu
));
520 if (fault_status
== FSC_PERM
&& !write_fault
) {
521 kvm_err("Unexpected L2 read permission error\n");
525 /* We need minimum second+third level pages */
526 ret
= mmu_topup_memory_cache(memcache
, 2, KVM_NR_MEM_OBJS
);
530 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
532 * Ensure the read of mmu_notifier_seq happens before we call
533 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
534 * the page we just got a reference to gets unmapped before we have a
535 * chance to grab the mmu_lock, which ensure that if the page gets
536 * unmapped afterwards, the call to kvm_unmap_hva will take it away
537 * from us again properly. This smp_rmb() interacts with the smp_wmb()
538 * in kvm_mmu_notifier_invalidate_<page|range_end>.
542 pfn
= gfn_to_pfn_prot(vcpu
->kvm
, gfn
, write_fault
, &writable
);
543 if (is_error_pfn(pfn
))
546 new_pte
= pfn_pte(pfn
, PAGE_S2
);
547 coherent_icache_guest_page(vcpu
->kvm
, gfn
);
549 spin_lock(&vcpu
->kvm
->mmu_lock
);
550 if (mmu_notifier_retry(vcpu
->kvm
, mmu_seq
))
553 kvm_set_s2pte_writable(&new_pte
);
554 kvm_set_pfn_dirty(pfn
);
556 stage2_set_pte(vcpu
->kvm
, memcache
, fault_ipa
, &new_pte
, false);
559 spin_unlock(&vcpu
->kvm
->mmu_lock
);
560 kvm_release_pfn_clean(pfn
);
565 * kvm_handle_guest_abort - handles all 2nd stage aborts
566 * @vcpu: the VCPU pointer
567 * @run: the kvm_run structure
569 * Any abort that gets to the host is almost guaranteed to be caused by a
570 * missing second stage translation table entry, which can mean that either the
571 * guest simply needs more memory and we must allocate an appropriate page or it
572 * can mean that the guest tried to access I/O memory, which is emulated by user
573 * space. The distinction is based on the IPA causing the fault and whether this
574 * memory region has been registered as standard RAM by user space.
576 int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
578 unsigned long fault_status
;
579 phys_addr_t fault_ipa
;
580 struct kvm_memory_slot
*memslot
;
585 is_iabt
= kvm_vcpu_trap_is_iabt(vcpu
);
586 fault_ipa
= kvm_vcpu_get_fault_ipa(vcpu
);
588 trace_kvm_guest_fault(*vcpu_pc(vcpu
), kvm_vcpu_get_hsr(vcpu
),
589 kvm_vcpu_get_hfar(vcpu
), fault_ipa
);
591 /* Check the stage-2 fault is trans. fault or write fault */
592 fault_status
= kvm_vcpu_trap_get_fault(vcpu
);
593 if (fault_status
!= FSC_FAULT
&& fault_status
!= FSC_PERM
) {
594 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
595 kvm_vcpu_trap_get_class(vcpu
), fault_status
);
599 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
601 gfn
= fault_ipa
>> PAGE_SHIFT
;
602 if (!kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
604 /* Prefetch Abort on I/O address */
605 kvm_inject_pabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
610 if (fault_status
!= FSC_FAULT
) {
611 kvm_err("Unsupported fault status on io memory: %#lx\n",
618 * The IPA is reported as [MAX:12], so we need to
619 * complement it with the bottom 12 bits from the
620 * faulting VA. This is always 12 bits, irrespective
623 fault_ipa
|= kvm_vcpu_get_hfar(vcpu
) & ((1 << 12) - 1);
624 ret
= io_mem_abort(vcpu
, run
, fault_ipa
);
628 memslot
= gfn_to_memslot(vcpu
->kvm
, gfn
);
630 ret
= user_mem_abort(vcpu
, fault_ipa
, gfn
, memslot
, fault_status
);
634 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
638 static void handle_hva_to_gpa(struct kvm
*kvm
,
641 void (*handler
)(struct kvm
*kvm
,
642 gpa_t gpa
, void *data
),
645 struct kvm_memslots
*slots
;
646 struct kvm_memory_slot
*memslot
;
648 slots
= kvm_memslots(kvm
);
650 /* we only care about the pages that the guest sees */
651 kvm_for_each_memslot(memslot
, slots
) {
652 unsigned long hva_start
, hva_end
;
655 hva_start
= max(start
, memslot
->userspace_addr
);
656 hva_end
= min(end
, memslot
->userspace_addr
+
657 (memslot
->npages
<< PAGE_SHIFT
));
658 if (hva_start
>= hva_end
)
662 * {gfn(page) | page intersects with [hva_start, hva_end)} =
663 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
665 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
666 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
668 for (; gfn
< gfn_end
; ++gfn
) {
669 gpa_t gpa
= gfn
<< PAGE_SHIFT
;
670 handler(kvm
, gpa
, data
);
675 static void kvm_unmap_hva_handler(struct kvm
*kvm
, gpa_t gpa
, void *data
)
677 unmap_stage2_range(kvm
, gpa
, PAGE_SIZE
);
678 kvm_tlb_flush_vmid_ipa(kvm
, gpa
);
681 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
683 unsigned long end
= hva
+ PAGE_SIZE
;
688 trace_kvm_unmap_hva(hva
);
689 handle_hva_to_gpa(kvm
, hva
, end
, &kvm_unmap_hva_handler
, NULL
);
693 int kvm_unmap_hva_range(struct kvm
*kvm
,
694 unsigned long start
, unsigned long end
)
699 trace_kvm_unmap_hva_range(start
, end
);
700 handle_hva_to_gpa(kvm
, start
, end
, &kvm_unmap_hva_handler
, NULL
);
704 static void kvm_set_spte_handler(struct kvm
*kvm
, gpa_t gpa
, void *data
)
706 pte_t
*pte
= (pte_t
*)data
;
708 stage2_set_pte(kvm
, NULL
, gpa
, pte
, false);
712 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
714 unsigned long end
= hva
+ PAGE_SIZE
;
720 trace_kvm_set_spte_hva(hva
);
721 stage2_pte
= pfn_pte(pte_pfn(pte
), PAGE_S2
);
722 handle_hva_to_gpa(kvm
, hva
, end
, &kvm_set_spte_handler
, &stage2_pte
);
725 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
727 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
730 phys_addr_t
kvm_mmu_get_httbr(void)
732 return virt_to_phys(hyp_pgd
);
735 phys_addr_t
kvm_mmu_get_boot_httbr(void)
737 return virt_to_phys(boot_hyp_pgd
);
740 phys_addr_t
kvm_get_idmap_vector(void)
742 return hyp_idmap_vector
;
745 int kvm_mmu_init(void)
749 hyp_idmap_start
= virt_to_phys(__hyp_idmap_text_start
);
750 hyp_idmap_end
= virt_to_phys(__hyp_idmap_text_end
);
751 hyp_idmap_vector
= virt_to_phys(__kvm_hyp_init
);
753 if ((hyp_idmap_start
^ hyp_idmap_end
) & PAGE_MASK
) {
755 * Our init code is crossing a page boundary. Allocate
756 * a bounce page, copy the code over and use that.
758 size_t len
= __hyp_idmap_text_end
- __hyp_idmap_text_start
;
759 phys_addr_t phys_base
;
761 init_bounce_page
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
762 if (!init_bounce_page
) {
763 kvm_err("Couldn't allocate HYP init bounce page\n");
768 memcpy(init_bounce_page
, __hyp_idmap_text_start
, len
);
770 * Warning: the code we just copied to the bounce page
771 * must be flushed to the point of coherency.
772 * Otherwise, the data may be sitting in L2, and HYP
773 * mode won't be able to observe it as it runs with
774 * caches off at that point.
776 kvm_flush_dcache_to_poc(init_bounce_page
, len
);
778 phys_base
= virt_to_phys(init_bounce_page
);
779 hyp_idmap_vector
+= phys_base
- hyp_idmap_start
;
780 hyp_idmap_start
= phys_base
;
781 hyp_idmap_end
= phys_base
+ len
;
783 kvm_info("Using HYP init bounce page @%lx\n",
784 (unsigned long)phys_base
);
787 hyp_pgd
= kzalloc(PTRS_PER_PGD
* sizeof(pgd_t
), GFP_KERNEL
);
788 boot_hyp_pgd
= kzalloc(PTRS_PER_PGD
* sizeof(pgd_t
), GFP_KERNEL
);
789 if (!hyp_pgd
|| !boot_hyp_pgd
) {
790 kvm_err("Hyp mode PGD not allocated\n");
795 /* Create the idmap in the boot page tables */
796 err
= __create_hyp_mappings(boot_hyp_pgd
,
797 hyp_idmap_start
, hyp_idmap_end
,
798 __phys_to_pfn(hyp_idmap_start
),
802 kvm_err("Failed to idmap %lx-%lx\n",
803 hyp_idmap_start
, hyp_idmap_end
);
807 /* Map the very same page at the trampoline VA */
808 err
= __create_hyp_mappings(boot_hyp_pgd
,
809 TRAMPOLINE_VA
, TRAMPOLINE_VA
+ PAGE_SIZE
,
810 __phys_to_pfn(hyp_idmap_start
),
813 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
818 /* Map the same page again into the runtime page tables */
819 err
= __create_hyp_mappings(hyp_pgd
,
820 TRAMPOLINE_VA
, TRAMPOLINE_VA
+ PAGE_SIZE
,
821 __phys_to_pfn(hyp_idmap_start
),
824 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",