]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm/include/asm/kvm_mmu.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-artful-kernel.git] / arch / arm / include / asm / kvm_mmu.h
1 /*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #ifndef __ARM_KVM_MMU_H__
20 #define __ARM_KVM_MMU_H__
21
22 #include <asm/memory.h>
23 #include <asm/page.h>
24
25 /*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
29 #define HYP_PAGE_OFFSET_MASK UL(~0)
30 #define HYP_PAGE_OFFSET PAGE_OFFSET
31 #define KERN_TO_HYP(kva) (kva)
32
33 /*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38 #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
40 /*
41 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
42 */
43 #define KVM_MMU_CACHE_MIN_PAGES 2
44
45 #ifndef __ASSEMBLY__
46
47 #include <linux/highmem.h>
48 #include <asm/cacheflush.h>
49 #include <asm/pgalloc.h>
50
51 int create_hyp_mappings(void *from, void *to);
52 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
53 void free_boot_hyp_pgd(void);
54 void free_hyp_pgds(void);
55
56 void stage2_unmap_vm(struct kvm *kvm);
57 int kvm_alloc_stage2_pgd(struct kvm *kvm);
58 void kvm_free_stage2_pgd(struct kvm *kvm);
59 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
60 phys_addr_t pa, unsigned long size, bool writable);
61
62 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
63
64 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
65
66 phys_addr_t kvm_mmu_get_httbr(void);
67 phys_addr_t kvm_mmu_get_boot_httbr(void);
68 phys_addr_t kvm_get_idmap_vector(void);
69 phys_addr_t kvm_get_idmap_start(void);
70 int kvm_mmu_init(void);
71 void kvm_clear_hyp_idmap(void);
72
73 static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
74 {
75 *pmd = new_pmd;
76 flush_pmd_entry(pmd);
77 }
78
79 static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
80 {
81 *pte = new_pte;
82 /*
83 * flush_pmd_entry just takes a void pointer and cleans the necessary
84 * cache entries, so we can reuse the function for ptes.
85 */
86 flush_pmd_entry(pte);
87 }
88
89 static inline void kvm_clean_pgd(pgd_t *pgd)
90 {
91 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
92 }
93
94 static inline void kvm_clean_pmd(pmd_t *pmd)
95 {
96 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
97 }
98
99 static inline void kvm_clean_pmd_entry(pmd_t *pmd)
100 {
101 clean_pmd_entry(pmd);
102 }
103
104 static inline void kvm_clean_pte(pte_t *pte)
105 {
106 clean_pte_table(pte);
107 }
108
109 static inline void kvm_set_s2pte_writable(pte_t *pte)
110 {
111 pte_val(*pte) |= L_PTE_S2_RDWR;
112 }
113
114 static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
115 {
116 pmd_val(*pmd) |= L_PMD_S2_RDWR;
117 }
118
119 static inline void kvm_set_s2pte_readonly(pte_t *pte)
120 {
121 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
122 }
123
124 static inline bool kvm_s2pte_readonly(pte_t *pte)
125 {
126 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
127 }
128
129 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
130 {
131 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
132 }
133
134 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
135 {
136 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
137 }
138
139
140 /* Open coded p*d_addr_end that can deal with 64bit addresses */
141 #define kvm_pgd_addr_end(addr, end) \
142 ({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
143 (__boundary - 1 < (end) - 1)? __boundary: (end); \
144 })
145
146 #define kvm_pud_addr_end(addr,end) (end)
147
148 #define kvm_pmd_addr_end(addr, end) \
149 ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
150 (__boundary - 1 < (end) - 1)? __boundary: (end); \
151 })
152
153 #define kvm_pgd_index(addr) pgd_index(addr)
154
155 static inline bool kvm_page_empty(void *ptr)
156 {
157 struct page *ptr_page = virt_to_page(ptr);
158 return page_count(ptr_page) == 1;
159 }
160
161 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
162 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
163 #define kvm_pud_table_empty(kvm, pudp) (0)
164
165 #define KVM_PREALLOC_LEVEL 0
166
167 static inline void *kvm_get_hwpgd(struct kvm *kvm)
168 {
169 return kvm->arch.pgd;
170 }
171
172 static inline unsigned int kvm_get_hwpgd_size(void)
173 {
174 return PTRS_PER_S2_PGD * sizeof(pgd_t);
175 }
176
177 struct kvm;
178
179 #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
180
181 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
182 {
183 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
184 }
185
186 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
187 kvm_pfn_t pfn,
188 unsigned long size,
189 bool ipa_uncached)
190 {
191 /*
192 * If we are going to insert an instruction page and the icache is
193 * either VIPT or PIPT, there is a potential problem where the host
194 * (or another VM) may have used the same page as this guest, and we
195 * read incorrect data from the icache. If we're using a PIPT cache,
196 * we can invalidate just that page, but if we are using a VIPT cache
197 * we need to invalidate the entire icache - damn shame - as written
198 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
199 *
200 * VIVT caches are tagged using both the ASID and the VMID and doesn't
201 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
202 *
203 * We need to do this through a kernel mapping (using the
204 * user-space mapping has proved to be the wrong
205 * solution). For that, we need to kmap one page at a time,
206 * and iterate over the range.
207 */
208
209 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
210
211 VM_BUG_ON(size & ~PAGE_MASK);
212
213 if (!need_flush && !icache_is_pipt())
214 goto vipt_cache;
215
216 while (size) {
217 void *va = kmap_atomic_pfn(pfn);
218
219 if (need_flush)
220 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
221
222 if (icache_is_pipt())
223 __cpuc_coherent_user_range((unsigned long)va,
224 (unsigned long)va + PAGE_SIZE);
225
226 size -= PAGE_SIZE;
227 pfn++;
228
229 kunmap_atomic(va);
230 }
231
232 vipt_cache:
233 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
234 /* any kind of VIPT cache */
235 __flush_icache_all();
236 }
237 }
238
239 static inline void __kvm_flush_dcache_pte(pte_t pte)
240 {
241 void *va = kmap_atomic(pte_page(pte));
242
243 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
244
245 kunmap_atomic(va);
246 }
247
248 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
249 {
250 unsigned long size = PMD_SIZE;
251 kvm_pfn_t pfn = pmd_pfn(pmd);
252
253 while (size) {
254 void *va = kmap_atomic_pfn(pfn);
255
256 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
257
258 pfn++;
259 size -= PAGE_SIZE;
260
261 kunmap_atomic(va);
262 }
263 }
264
265 static inline void __kvm_flush_dcache_pud(pud_t pud)
266 {
267 }
268
269 #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
270
271 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
272 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
273
274 static inline bool __kvm_cpu_uses_extended_idmap(void)
275 {
276 return false;
277 }
278
279 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
280 pgd_t *hyp_pgd,
281 pgd_t *merged_hyp_pgd,
282 unsigned long hyp_idmap_start) { }
283
284 static inline unsigned int kvm_get_vmid_bits(void)
285 {
286 return 8;
287 }
288
289 #endif /* !__ASSEMBLY__ */
290
291 #endif /* __ARM_KVM_MMU_H__ */