2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address). We need to find out how many bits to mask.
34 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
50 * T = __pa_symbol(__hyp_idmap_text_start)
51 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
72 #define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
73 #define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
77 #include <asm/alternative.h>
78 #include <asm/cpufeature.h>
81 * Convert a kernel VA into a HYP VA.
82 * reg: VA to be converted.
84 * This generates the following sequences:
86 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
89 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
90 * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
95 * The "low mask" version works because the mask is a strict subset of
96 * the "high mask", hence performing the first mask for nothing.
97 * Should be completely invisible on any viable CPU.
99 .macro kern_hyp_va reg
100 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
101 and \reg
, \reg
, #HYP_PAGE_OFFSET_HIGH_MASK
102 alternative_else_nop_endif
103 alternative_if ARM64_HYP_OFFSET_LOW
104 and \reg
, \reg
, #HYP_PAGE_OFFSET_LOW_MASK
105 alternative_else_nop_endif
110 #include <asm/pgalloc.h>
111 #include <asm/cache.h>
112 #include <asm/cacheflush.h>
113 #include <asm/mmu_context.h>
114 #include <asm/pgtable.h>
116 static inline unsigned long __kern_hyp_va(unsigned long v
)
118 asm volatile(ALTERNATIVE("and %0, %0, %1",
120 ARM64_HAS_VIRT_HOST_EXTN
)
122 : "i" (HYP_PAGE_OFFSET_HIGH_MASK
));
123 asm volatile(ALTERNATIVE("nop",
125 ARM64_HYP_OFFSET_LOW
)
127 : "i" (HYP_PAGE_OFFSET_LOW_MASK
));
131 #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
134 * Obtain the PC-relative address of a kernel symbol
137 * The goal of this macro is to return a symbol's address based on a
138 * PC-relative computation, as opposed to a loading the VA from a
139 * constant pool or something similar. This works well for HYP, as an
140 * absolute VA is guaranteed to be wrong. Only use this if trying to
141 * obtain the address of a symbol (i.e. not something you obtained by
142 * following a pointer).
144 #define hyp_symbol_addr(s) \
147 asm("adrp %0, %1\n" \
148 "add %0, %0, :lo12:%1\n" \
149 : "=r" (addr) : "S" (&s)); \
154 * We currently only support a 40bit IPA.
156 #define KVM_PHYS_SHIFT (40)
157 #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
158 #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
160 #include <asm/stage2_pgtable.h>
162 int create_hyp_mappings(void *from
, void *to
, pgprot_t prot
);
163 int create_hyp_io_mappings(void *from
, void *to
, phys_addr_t
);
164 void free_hyp_pgds(void);
166 void stage2_unmap_vm(struct kvm
*kvm
);
167 int kvm_alloc_stage2_pgd(struct kvm
*kvm
);
168 void kvm_free_stage2_pgd(struct kvm
*kvm
);
169 int kvm_phys_addr_ioremap(struct kvm
*kvm
, phys_addr_t guest_ipa
,
170 phys_addr_t pa
, unsigned long size
, bool writable
);
172 int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
174 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
);
176 phys_addr_t
kvm_mmu_get_httbr(void);
177 phys_addr_t
kvm_get_idmap_vector(void);
178 int kvm_mmu_init(void);
179 void kvm_clear_hyp_idmap(void);
181 #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
182 #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
184 static inline pte_t
kvm_s2pte_mkwrite(pte_t pte
)
186 pte_val(pte
) |= PTE_S2_RDWR
;
190 static inline pmd_t
kvm_s2pmd_mkwrite(pmd_t pmd
)
192 pmd_val(pmd
) |= PMD_S2_RDWR
;
196 static inline void kvm_set_s2pte_readonly(pte_t
*pte
)
198 pteval_t old_pteval
, pteval
;
200 pteval
= READ_ONCE(pte_val(*pte
));
203 pteval
&= ~PTE_S2_RDWR
;
204 pteval
|= PTE_S2_RDONLY
;
205 pteval
= cmpxchg_relaxed(&pte_val(*pte
), old_pteval
, pteval
);
206 } while (pteval
!= old_pteval
);
209 static inline bool kvm_s2pte_readonly(pte_t
*pte
)
211 return (pte_val(*pte
) & PTE_S2_RDWR
) == PTE_S2_RDONLY
;
214 static inline void kvm_set_s2pmd_readonly(pmd_t
*pmd
)
216 kvm_set_s2pte_readonly((pte_t
*)pmd
);
219 static inline bool kvm_s2pmd_readonly(pmd_t
*pmd
)
221 return kvm_s2pte_readonly((pte_t
*)pmd
);
224 static inline bool kvm_page_empty(void *ptr
)
226 struct page
*ptr_page
= virt_to_page(ptr
);
227 return page_count(ptr_page
) == 1;
230 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
232 #ifdef __PAGETABLE_PMD_FOLDED
233 #define hyp_pmd_table_empty(pmdp) (0)
235 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
238 #ifdef __PAGETABLE_PUD_FOLDED
239 #define hyp_pud_table_empty(pudp) (0)
241 #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
246 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
248 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu
*vcpu
)
250 return (vcpu_sys_reg(vcpu
, SCTLR_EL1
) & 0b101) == 0b101;
253 static inline void __coherent_cache_guest_page(struct kvm_vcpu
*vcpu
,
257 void *va
= page_address(pfn_to_page(pfn
));
259 kvm_flush_dcache_to_poc(va
, size
);
261 if (icache_is_aliasing()) {
262 /* any kind of VIPT cache */
263 __flush_icache_all();
264 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
265 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
266 flush_icache_range((unsigned long)va
,
267 (unsigned long)va
+ size
);
271 static inline void __kvm_flush_dcache_pte(pte_t pte
)
273 struct page
*page
= pte_page(pte
);
274 kvm_flush_dcache_to_poc(page_address(page
), PAGE_SIZE
);
277 static inline void __kvm_flush_dcache_pmd(pmd_t pmd
)
279 struct page
*page
= pmd_page(pmd
);
280 kvm_flush_dcache_to_poc(page_address(page
), PMD_SIZE
);
283 static inline void __kvm_flush_dcache_pud(pud_t pud
)
285 struct page
*page
= pud_page(pud
);
286 kvm_flush_dcache_to_poc(page_address(page
), PUD_SIZE
);
289 #define kvm_virt_to_phys(x) __pa_symbol(x)
291 void kvm_set_way_flush(struct kvm_vcpu
*vcpu
);
292 void kvm_toggle_cache(struct kvm_vcpu
*vcpu
, bool was_enabled
);
294 static inline bool __kvm_cpu_uses_extended_idmap(void)
296 return __cpu_uses_extended_idmap();
299 static inline void __kvm_extend_hypmap(pgd_t
*boot_hyp_pgd
,
301 pgd_t
*merged_hyp_pgd
,
302 unsigned long hyp_idmap_start
)
307 * Use the first entry to access the HYP mappings. It is
308 * guaranteed to be free, otherwise we wouldn't use an
311 VM_BUG_ON(pgd_val(merged_hyp_pgd
[0]));
312 merged_hyp_pgd
[0] = __pgd(__pa(hyp_pgd
) | PMD_TYPE_TABLE
);
315 * Create another extended level entry that points to the boot HYP map,
316 * which contains an ID mapping of the HYP init code. We essentially
317 * merge the boot and runtime HYP maps by doing so, but they don't
318 * overlap anyway, so this is fine.
320 idmap_idx
= hyp_idmap_start
>> VA_BITS
;
321 VM_BUG_ON(pgd_val(merged_hyp_pgd
[idmap_idx
]));
322 merged_hyp_pgd
[idmap_idx
] = __pgd(__pa(boot_hyp_pgd
) | PMD_TYPE_TABLE
);
325 static inline unsigned int kvm_get_vmid_bits(void)
327 int reg
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1
);
329 return (cpuid_feature_extract_unsigned_field(reg
, ID_AA64MMFR1_VMIDBITS_SHIFT
) == 2) ? 16 : 8;
333 * We are not in the kvm->srcu critical section most of the time, so we take
334 * the SRCU read lock here. Since we copy the data from the user page, we
335 * can immediately drop the lock again.
337 static inline int kvm_read_guest_lock(struct kvm
*kvm
,
338 gpa_t gpa
, void *data
, unsigned long len
)
340 int srcu_idx
= srcu_read_lock(&kvm
->srcu
);
341 int ret
= kvm_read_guest(kvm
, gpa
, data
, len
);
343 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
348 static inline int kvm_write_guest_lock(struct kvm
*kvm
, gpa_t gpa
,
349 const void *data
, unsigned long len
)
351 int srcu_idx
= srcu_read_lock(&kvm
->srcu
);
352 int ret
= kvm_write_guest(kvm
, gpa
, data
, len
);
354 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
359 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
362 static inline void *kvm_get_hyp_vector(void)
364 struct bp_hardening_data
*data
= arm64_get_bp_hardening_data();
365 void *vect
= kvm_ksym_ref(__kvm_hyp_vector
);
368 vect
= __bp_harden_hyp_vecs_start
+
369 data
->hyp_vectors_slot
* SZ_2K
;
372 vect
= lm_alias(vect
);
378 static inline int kvm_map_vectors(void)
380 return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start
),
381 kvm_ksym_ref(__bp_harden_hyp_vecs_end
),
386 static inline void *kvm_get_hyp_vector(void)
388 return kvm_ksym_ref(__kvm_hyp_vector
);
391 static inline int kvm_map_vectors(void)
397 #ifdef CONFIG_ARM64_SSBD
398 DECLARE_PER_CPU_READ_MOSTLY(u64
, arm64_ssbd_callback_required
);
400 static inline int hyp_map_aux_data(void)
404 for_each_possible_cpu(cpu
) {
407 ptr
= per_cpu_ptr(&arm64_ssbd_callback_required
, cpu
);
408 err
= create_hyp_mappings(ptr
, ptr
+ 1, PAGE_HYP
);
415 static inline int hyp_map_aux_data(void)
421 #endif /* __ASSEMBLY__ */
422 #endif /* __ARM64_KVM_MMU_H__ */