2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
34 * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
35 * macros (the entire kernel runs at EL2).
37 #define HYP_PAGE_OFFSET_SHIFT VA_BITS
38 #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
39 #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
42 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
43 * shared across all the page-tables. Conveniently, we use the last
44 * possible page, where no kernel mapping will ever exist.
46 #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
50 #include <asm/alternative.h>
51 #include <asm/cpufeature.h>
54 * Convert a kernel VA into a HYP VA.
55 * reg: VA to be converted.
57 .macro kern_hyp_va reg
58 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
59 and \reg
, \reg
, #HYP_PAGE_OFFSET_MASK
67 #include <asm/pgalloc.h>
68 #include <asm/cachetype.h>
69 #include <asm/cacheflush.h>
70 #include <asm/mmu_context.h>
71 #include <asm/pgtable.h>
73 #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
76 * We currently only support a 40bit IPA.
78 #define KVM_PHYS_SHIFT (40)
79 #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
80 #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
82 #include <asm/stage2_pgtable.h>
84 int create_hyp_mappings(void *from
, void *to
);
85 int create_hyp_io_mappings(void *from
, void *to
, phys_addr_t
);
86 void free_boot_hyp_pgd(void);
87 void free_hyp_pgds(void);
89 void stage2_unmap_vm(struct kvm
*kvm
);
90 int kvm_alloc_stage2_pgd(struct kvm
*kvm
);
91 void kvm_free_stage2_pgd(struct kvm
*kvm
);
92 int kvm_phys_addr_ioremap(struct kvm
*kvm
, phys_addr_t guest_ipa
,
93 phys_addr_t pa
, unsigned long size
, bool writable
);
95 int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
97 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
);
99 phys_addr_t
kvm_mmu_get_httbr(void);
100 phys_addr_t
kvm_mmu_get_boot_httbr(void);
101 phys_addr_t
kvm_get_idmap_vector(void);
102 phys_addr_t
kvm_get_idmap_start(void);
103 int kvm_mmu_init(void);
104 void kvm_clear_hyp_idmap(void);
106 #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
107 #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
109 static inline void kvm_clean_pgd(pgd_t
*pgd
) {}
110 static inline void kvm_clean_pmd(pmd_t
*pmd
) {}
111 static inline void kvm_clean_pmd_entry(pmd_t
*pmd
) {}
112 static inline void kvm_clean_pte(pte_t
*pte
) {}
113 static inline void kvm_clean_pte_entry(pte_t
*pte
) {}
115 static inline pte_t
kvm_s2pte_mkwrite(pte_t pte
)
117 pte_val(pte
) |= PTE_S2_RDWR
;
121 static inline pmd_t
kvm_s2pmd_mkwrite(pmd_t pmd
)
123 pmd_val(pmd
) |= PMD_S2_RDWR
;
127 static inline void kvm_set_s2pte_readonly(pte_t
*pte
)
132 asm volatile("// kvm_set_s2pte_readonly\n"
133 " prfm pstl1strm, %2\n"
135 " and %0, %0, %3 // clear PTE_S2_RDWR\n"
136 " orr %0, %0, %4 // set PTE_S2_RDONLY\n"
137 " stxr %w1, %0, %2\n"
139 : "=&r" (pteval
), "=&r" (tmp
), "+Q" (pte_val(*pte
))
140 : "L" (~PTE_S2_RDWR
), "L" (PTE_S2_RDONLY
));
143 static inline bool kvm_s2pte_readonly(pte_t
*pte
)
145 return (pte_val(*pte
) & PTE_S2_RDWR
) == PTE_S2_RDONLY
;
148 static inline void kvm_set_s2pmd_readonly(pmd_t
*pmd
)
150 kvm_set_s2pte_readonly((pte_t
*)pmd
);
153 static inline bool kvm_s2pmd_readonly(pmd_t
*pmd
)
155 return kvm_s2pte_readonly((pte_t
*)pmd
);
158 static inline bool kvm_page_empty(void *ptr
)
160 struct page
*ptr_page
= virt_to_page(ptr
);
161 return page_count(ptr_page
) == 1;
164 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
166 #ifdef __PAGETABLE_PMD_FOLDED
167 #define hyp_pmd_table_empty(pmdp) (0)
169 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
172 #ifdef __PAGETABLE_PUD_FOLDED
173 #define hyp_pud_table_empty(pudp) (0)
175 #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
180 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
182 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu
*vcpu
)
184 return (vcpu_sys_reg(vcpu
, SCTLR_EL1
) & 0b101) == 0b101;
187 static inline void __coherent_cache_guest_page(struct kvm_vcpu
*vcpu
,
192 void *va
= page_address(pfn_to_page(pfn
));
194 if (!vcpu_has_cache_enabled(vcpu
) || ipa_uncached
)
195 kvm_flush_dcache_to_poc(va
, size
);
197 if (!icache_is_aliasing()) { /* PIPT */
198 flush_icache_range((unsigned long)va
,
199 (unsigned long)va
+ size
);
200 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
201 /* any kind of VIPT cache */
202 __flush_icache_all();
206 static inline void __kvm_flush_dcache_pte(pte_t pte
)
208 struct page
*page
= pte_page(pte
);
209 kvm_flush_dcache_to_poc(page_address(page
), PAGE_SIZE
);
212 static inline void __kvm_flush_dcache_pmd(pmd_t pmd
)
214 struct page
*page
= pmd_page(pmd
);
215 kvm_flush_dcache_to_poc(page_address(page
), PMD_SIZE
);
218 static inline void __kvm_flush_dcache_pud(pud_t pud
)
220 struct page
*page
= pud_page(pud
);
221 kvm_flush_dcache_to_poc(page_address(page
), PUD_SIZE
);
224 #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
226 void kvm_set_way_flush(struct kvm_vcpu
*vcpu
);
227 void kvm_toggle_cache(struct kvm_vcpu
*vcpu
, bool was_enabled
);
229 static inline bool __kvm_cpu_uses_extended_idmap(void)
231 return __cpu_uses_extended_idmap();
234 static inline void __kvm_extend_hypmap(pgd_t
*boot_hyp_pgd
,
236 pgd_t
*merged_hyp_pgd
,
237 unsigned long hyp_idmap_start
)
242 * Use the first entry to access the HYP mappings. It is
243 * guaranteed to be free, otherwise we wouldn't use an
246 VM_BUG_ON(pgd_val(merged_hyp_pgd
[0]));
247 merged_hyp_pgd
[0] = __pgd(__pa(hyp_pgd
) | PMD_TYPE_TABLE
);
250 * Create another extended level entry that points to the boot HYP map,
251 * which contains an ID mapping of the HYP init code. We essentially
252 * merge the boot and runtime HYP maps by doing so, but they don't
253 * overlap anyway, so this is fine.
255 idmap_idx
= hyp_idmap_start
>> VA_BITS
;
256 VM_BUG_ON(pgd_val(merged_hyp_pgd
[idmap_idx
]));
257 merged_hyp_pgd
[idmap_idx
] = __pgd(__pa(boot_hyp_pgd
) | PMD_TYPE_TABLE
);
260 static inline unsigned int kvm_get_vmid_bits(void)
262 int reg
= read_system_reg(SYS_ID_AA64MMFR1_EL1
);
264 return (cpuid_feature_extract_unsigned_field(reg
, ID_AA64MMFR1_VMIDBITS_SHIFT
) == 2) ? 16 : 8;
267 #endif /* __ASSEMBLY__ */
268 #endif /* __ARM64_KVM_MMU_H__ */