]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/mips/kvm/mmu.c
Merge tag 'batadv-net-for-davem-20161202' of git://git.open-mesh.org/linux-merge
[mirror_ubuntu-bionic-kernel.git] / arch / mips / kvm / mmu.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS MMU handling in the KVM module.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <asm/mmu_context.h>
15
16 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
17 {
18 int cpu = smp_processor_id();
19
20 return vcpu->arch.guest_kernel_asid[cpu] &
21 cpu_asid_mask(&cpu_data[cpu]);
22 }
23
24 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
25 {
26 int cpu = smp_processor_id();
27
28 return vcpu->arch.guest_user_asid[cpu] &
29 cpu_asid_mask(&cpu_data[cpu]);
30 }
31
32 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
33 {
34 int srcu_idx, err = 0;
35 kvm_pfn_t pfn;
36
37 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
38 return 0;
39
40 srcu_idx = srcu_read_lock(&kvm->srcu);
41 pfn = gfn_to_pfn(kvm, gfn);
42
43 if (is_error_noslot_pfn(pfn)) {
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 err = -EFAULT;
46 goto out;
47 }
48
49 kvm->arch.guest_pmap[gfn] = pfn;
50 out:
51 srcu_read_unlock(&kvm->srcu, srcu_idx);
52 return err;
53 }
54
55 /* Translate guest KSEG0 addresses to Host PA */
56 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
57 unsigned long gva)
58 {
59 gfn_t gfn;
60 unsigned long offset = gva & ~PAGE_MASK;
61 struct kvm *kvm = vcpu->kvm;
62
63 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
64 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
65 __builtin_return_address(0), gva);
66 return KVM_INVALID_PAGE;
67 }
68
69 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
70
71 if (gfn >= kvm->arch.guest_pmap_npages) {
72 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
73 gva);
74 return KVM_INVALID_PAGE;
75 }
76
77 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
78 return KVM_INVALID_ADDR;
79
80 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
81 }
82
83 /* XXXKYMA: Must be called with interrupts disabled */
84 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
85 struct kvm_vcpu *vcpu)
86 {
87 gfn_t gfn;
88 kvm_pfn_t pfn0, pfn1;
89 unsigned long vaddr = 0;
90 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
91 struct kvm *kvm = vcpu->kvm;
92 const int flush_dcache_mask = 0;
93 int ret;
94
95 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
96 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
97 kvm_mips_dump_host_tlbs();
98 return -1;
99 }
100
101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 gfn, badvaddr);
105 kvm_mips_dump_host_tlbs();
106 return -1;
107 }
108 vaddr = badvaddr & (PAGE_MASK << 1);
109
110 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
111 return -1;
112
113 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
114 return -1;
115
116 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
117 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
118
119 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
120 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
121 ENTRYLO_D | ENTRYLO_V;
122 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
123 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
124 ENTRYLO_D | ENTRYLO_V;
125
126 preempt_disable();
127 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
128 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
129 flush_dcache_mask);
130 preempt_enable();
131
132 return ret;
133 }
134
135 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
136 struct kvm_mips_tlb *tlb)
137 {
138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 struct kvm *kvm = vcpu->kvm;
140 kvm_pfn_t pfn0, pfn1;
141 gfn_t gfn0, gfn1;
142 long tlb_lo[2];
143 int ret;
144
145 tlb_lo[0] = tlb->tlb_lo[0];
146 tlb_lo[1] = tlb->tlb_lo[1];
147
148 /*
149 * The commpage address must not be mapped to anything else if the guest
150 * TLB contains entries nearby, or commpage accesses will break.
151 */
152 if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
153 VPN2_MASK & (PAGE_MASK << 1)))
154 tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
155
156 gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
157 gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
158 if (gfn0 >= kvm->arch.guest_pmap_npages ||
159 gfn1 >= kvm->arch.guest_pmap_npages) {
160 kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
161 __func__, gfn0, gfn1, tlb->tlb_hi);
162 kvm_mips_dump_guest_tlbs(vcpu);
163 return -1;
164 }
165
166 if (kvm_mips_map_page(kvm, gfn0) < 0)
167 return -1;
168
169 if (kvm_mips_map_page(kvm, gfn1) < 0)
170 return -1;
171
172 pfn0 = kvm->arch.guest_pmap[gfn0];
173 pfn1 = kvm->arch.guest_pmap[gfn1];
174
175 /* Get attributes from the Guest TLB */
176 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
177 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
178 (tlb_lo[0] & ENTRYLO_D) |
179 (tlb_lo[0] & ENTRYLO_V);
180 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
181 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
182 (tlb_lo[1] & ENTRYLO_D) |
183 (tlb_lo[1] & ENTRYLO_V);
184
185 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
186 tlb->tlb_lo[0], tlb->tlb_lo[1]);
187
188 preempt_disable();
189 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
190 kvm_mips_get_kernel_asid(vcpu) :
191 kvm_mips_get_user_asid(vcpu));
192 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
193 tlb->tlb_mask);
194 preempt_enable();
195
196 return ret;
197 }
198
199 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
200 struct kvm_vcpu *vcpu)
201 {
202 unsigned long asid = asid_cache(cpu);
203
204 asid += cpu_asid_inc();
205 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
206 if (cpu_has_vtag_icache)
207 flush_icache_all();
208
209 kvm_local_flush_tlb_all(); /* start new asid cycle */
210
211 if (!asid) /* fix version if needed */
212 asid = asid_first_version(cpu);
213 }
214
215 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
216 }
217
218 /**
219 * kvm_mips_migrate_count() - Migrate timer.
220 * @vcpu: Virtual CPU.
221 *
222 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
223 * if it was running prior to being cancelled.
224 *
225 * Must be called when the VCPU is migrated to a different CPU to ensure that
226 * timer expiry during guest execution interrupts the guest and causes the
227 * interrupt to be delivered in a timely manner.
228 */
229 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
230 {
231 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
232 hrtimer_restart(&vcpu->arch.comparecount_timer);
233 }
234
235 /* Restore ASID once we are scheduled back after preemption */
236 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
237 {
238 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
239 unsigned long flags;
240 int newasid = 0;
241
242 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
243
244 /* Allocate new kernel and user ASIDs if needed */
245
246 local_irq_save(flags);
247
248 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
249 asid_version_mask(cpu)) {
250 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
251 vcpu->arch.guest_kernel_asid[cpu] =
252 vcpu->arch.guest_kernel_mm.context.asid[cpu];
253 newasid++;
254
255 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
256 cpu_context(cpu, current->mm));
257 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
258 cpu, vcpu->arch.guest_kernel_asid[cpu]);
259 }
260
261 if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
262 asid_version_mask(cpu)) {
263 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
264 vcpu->arch.guest_user_asid[cpu] =
265 vcpu->arch.guest_user_mm.context.asid[cpu];
266 newasid++;
267
268 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
269 cpu_context(cpu, current->mm));
270 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
271 vcpu->arch.guest_user_asid[cpu]);
272 }
273
274 if (vcpu->arch.last_sched_cpu != cpu) {
275 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
276 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
277 /*
278 * Migrate the timer interrupt to the current CPU so that it
279 * always interrupts the guest and synchronously triggers a
280 * guest timer interrupt.
281 */
282 kvm_mips_migrate_count(vcpu);
283 }
284
285 if (!newasid) {
286 /*
287 * If we preempted while the guest was executing, then reload
288 * the pre-empted ASID
289 */
290 if (current->flags & PF_VCPU) {
291 write_c0_entryhi(vcpu->arch.
292 preempt_entryhi & asid_mask);
293 ehb();
294 }
295 } else {
296 /* New ASIDs were allocated for the VM */
297
298 /*
299 * Were we in guest context? If so then the pre-empted ASID is
300 * no longer valid, we need to set it to what it should be based
301 * on the mode of the Guest (Kernel/User)
302 */
303 if (current->flags & PF_VCPU) {
304 if (KVM_GUEST_KERNEL_MODE(vcpu))
305 write_c0_entryhi(vcpu->arch.
306 guest_kernel_asid[cpu] &
307 asid_mask);
308 else
309 write_c0_entryhi(vcpu->arch.
310 guest_user_asid[cpu] &
311 asid_mask);
312 ehb();
313 }
314 }
315
316 /* restore guest state to registers */
317 kvm_mips_callbacks->vcpu_set_regs(vcpu);
318
319 local_irq_restore(flags);
320
321 }
322
323 /* ASID can change if another task is scheduled during preemption */
324 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
325 {
326 unsigned long flags;
327 int cpu;
328
329 local_irq_save(flags);
330
331 cpu = smp_processor_id();
332
333 vcpu->arch.preempt_entryhi = read_c0_entryhi();
334 vcpu->arch.last_sched_cpu = cpu;
335
336 /* save guest state in registers */
337 kvm_mips_callbacks->vcpu_get_regs(vcpu);
338
339 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
340 asid_version_mask(cpu))) {
341 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
342 cpu_context(cpu, current->mm));
343 drop_mmu_context(current->mm, cpu);
344 }
345 write_c0_entryhi(cpu_asid(cpu, current->mm));
346 ehb();
347
348 local_irq_restore(flags);
349 }
350
351 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
352 {
353 struct mips_coproc *cop0 = vcpu->arch.cop0;
354 unsigned long paddr, flags, vpn2, asid;
355 unsigned long va = (unsigned long)opc;
356 void *vaddr;
357 u32 inst;
358 int index;
359
360 if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
361 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
362 local_irq_save(flags);
363 index = kvm_mips_host_tlb_lookup(vcpu, va);
364 if (index >= 0) {
365 inst = *(opc);
366 } else {
367 vpn2 = va & VPN2_MASK;
368 asid = kvm_read_c0_guest_entryhi(cop0) &
369 KVM_ENTRYHI_ASID;
370 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
371 if (index < 0) {
372 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
373 __func__, opc, vcpu, read_c0_entryhi());
374 kvm_mips_dump_host_tlbs();
375 kvm_mips_dump_guest_tlbs(vcpu);
376 local_irq_restore(flags);
377 return KVM_INVALID_INST;
378 }
379 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
380 &vcpu->arch.guest_tlb[index])) {
381 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
382 __func__, opc, index, vcpu,
383 read_c0_entryhi());
384 kvm_mips_dump_guest_tlbs(vcpu);
385 local_irq_restore(flags);
386 return KVM_INVALID_INST;
387 }
388 inst = *(opc);
389 }
390 local_irq_restore(flags);
391 } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
392 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
393 vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
394 vaddr += paddr & ~PAGE_MASK;
395 inst = *(u32 *)vaddr;
396 kunmap_atomic(vaddr);
397 } else {
398 kvm_err("%s: illegal address: %p\n", __func__, opc);
399 return KVM_INVALID_INST;
400 }
401
402 return inst;
403 }