]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/mips/kvm/mips.c
Merge tag 'perf-urgent-for-mingo-4.14-20170928' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-bionic-kernel.git] / arch / mips / kvm / mips.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
20 #include <linux/fs.h>
21 #include <linux/bootmem.h>
22
23 #include <asm/fpu.h>
24 #include <asm/page.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
29
30 #include <linux/kvm_host.h>
31
32 #include "interrupt.h"
33 #include "commpage.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37
38 #ifndef VECTORSPACING
39 #define VECTORSPACING 0x100 /* for EI/VI mode */
40 #endif
41
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
45 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
46 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
47 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
48 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
49 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
50 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
51 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
52 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
53 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
54 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
55 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
56 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
57 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
58 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
59 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
60 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
61 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
62 #ifdef CONFIG_KVM_MIPS_VZ
63 { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
64 { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
65 { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
66 { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
67 { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
68 { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
69 { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
70 { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
71 #endif
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
75 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
76 {NULL}
77 };
78
79 bool kvm_trace_guest_mode_change;
80
81 int kvm_guest_mode_change_trace_reg(void)
82 {
83 kvm_trace_guest_mode_change = 1;
84 return 0;
85 }
86
87 void kvm_guest_mode_change_trace_unreg(void)
88 {
89 kvm_trace_guest_mode_change = 0;
90 }
91
92 /*
93 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
94 * Config7, so we are "runnable" if interrupts are pending
95 */
96 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
97 {
98 return !!(vcpu->arch.pending_exceptions);
99 }
100
101 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
102 {
103 return false;
104 }
105
106 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
107 {
108 return 1;
109 }
110
111 int kvm_arch_hardware_enable(void)
112 {
113 return kvm_mips_callbacks->hardware_enable();
114 }
115
116 void kvm_arch_hardware_disable(void)
117 {
118 kvm_mips_callbacks->hardware_disable();
119 }
120
121 int kvm_arch_hardware_setup(void)
122 {
123 return 0;
124 }
125
126 void kvm_arch_check_processor_compat(void *rtn)
127 {
128 *(int *)rtn = 0;
129 }
130
131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
132 {
133 switch (type) {
134 #ifdef CONFIG_KVM_MIPS_VZ
135 case KVM_VM_MIPS_VZ:
136 #else
137 case KVM_VM_MIPS_TE:
138 #endif
139 break;
140 default:
141 /* Unsupported KVM type */
142 return -EINVAL;
143 };
144
145 /* Allocate page table to map GPA -> RPA */
146 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
147 if (!kvm->arch.gpa_mm.pgd)
148 return -ENOMEM;
149
150 return 0;
151 }
152
153 bool kvm_arch_has_vcpu_debugfs(void)
154 {
155 return false;
156 }
157
158 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
159 {
160 return 0;
161 }
162
163 void kvm_mips_free_vcpus(struct kvm *kvm)
164 {
165 unsigned int i;
166 struct kvm_vcpu *vcpu;
167
168 kvm_for_each_vcpu(i, vcpu, kvm) {
169 kvm_arch_vcpu_free(vcpu);
170 }
171
172 mutex_lock(&kvm->lock);
173
174 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
175 kvm->vcpus[i] = NULL;
176
177 atomic_set(&kvm->online_vcpus, 0);
178
179 mutex_unlock(&kvm->lock);
180 }
181
182 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
183 {
184 /* It should always be safe to remove after flushing the whole range */
185 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
186 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
187 }
188
189 void kvm_arch_destroy_vm(struct kvm *kvm)
190 {
191 kvm_mips_free_vcpus(kvm);
192 kvm_mips_free_gpa_pt(kvm);
193 }
194
195 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
196 unsigned long arg)
197 {
198 return -ENOIOCTLCMD;
199 }
200
201 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
202 unsigned long npages)
203 {
204 return 0;
205 }
206
207 void kvm_arch_flush_shadow_all(struct kvm *kvm)
208 {
209 /* Flush whole GPA */
210 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
211
212 /* Let implementation do the rest */
213 kvm_mips_callbacks->flush_shadow_all(kvm);
214 }
215
216 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
217 struct kvm_memory_slot *slot)
218 {
219 /*
220 * The slot has been made invalid (ready for moving or deletion), so we
221 * need to ensure that it can no longer be accessed by any guest VCPUs.
222 */
223
224 spin_lock(&kvm->mmu_lock);
225 /* Flush slot from GPA */
226 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
227 slot->base_gfn + slot->npages - 1);
228 /* Let implementation do the rest */
229 kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
230 spin_unlock(&kvm->mmu_lock);
231 }
232
233 int kvm_arch_prepare_memory_region(struct kvm *kvm,
234 struct kvm_memory_slot *memslot,
235 const struct kvm_userspace_memory_region *mem,
236 enum kvm_mr_change change)
237 {
238 return 0;
239 }
240
241 void kvm_arch_commit_memory_region(struct kvm *kvm,
242 const struct kvm_userspace_memory_region *mem,
243 const struct kvm_memory_slot *old,
244 const struct kvm_memory_slot *new,
245 enum kvm_mr_change change)
246 {
247 int needs_flush;
248
249 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
250 __func__, kvm, mem->slot, mem->guest_phys_addr,
251 mem->memory_size, mem->userspace_addr);
252
253 /*
254 * If dirty page logging is enabled, write protect all pages in the slot
255 * ready for dirty logging.
256 *
257 * There is no need to do this in any of the following cases:
258 * CREATE: No dirty mappings will already exist.
259 * MOVE/DELETE: The old mappings will already have been cleaned up by
260 * kvm_arch_flush_shadow_memslot()
261 */
262 if (change == KVM_MR_FLAGS_ONLY &&
263 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
264 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
265 spin_lock(&kvm->mmu_lock);
266 /* Write protect GPA page table entries */
267 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
268 new->base_gfn + new->npages - 1);
269 /* Let implementation do the rest */
270 if (needs_flush)
271 kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
272 spin_unlock(&kvm->mmu_lock);
273 }
274 }
275
276 static inline void dump_handler(const char *symbol, void *start, void *end)
277 {
278 u32 *p;
279
280 pr_debug("LEAF(%s)\n", symbol);
281
282 pr_debug("\t.set push\n");
283 pr_debug("\t.set noreorder\n");
284
285 for (p = start; p < (u32 *)end; ++p)
286 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
287
288 pr_debug("\t.set\tpop\n");
289
290 pr_debug("\tEND(%s)\n", symbol);
291 }
292
293 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
294 {
295 int err, size;
296 void *gebase, *p, *handler, *refill_start, *refill_end;
297 int i;
298
299 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
300
301 if (!vcpu) {
302 err = -ENOMEM;
303 goto out;
304 }
305
306 err = kvm_vcpu_init(vcpu, kvm, id);
307
308 if (err)
309 goto out_free_cpu;
310
311 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
312
313 /*
314 * Allocate space for host mode exception handlers that handle
315 * guest mode exits
316 */
317 if (cpu_has_veic || cpu_has_vint)
318 size = 0x200 + VECTORSPACING * 64;
319 else
320 size = 0x4000;
321
322 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
323
324 if (!gebase) {
325 err = -ENOMEM;
326 goto out_uninit_cpu;
327 }
328 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
329 ALIGN(size, PAGE_SIZE), gebase);
330
331 /*
332 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
333 * limits us to the low 512MB of physical address space. If the memory
334 * we allocate is out of range, just give up now.
335 */
336 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
337 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
338 gebase);
339 err = -ENOMEM;
340 goto out_free_gebase;
341 }
342
343 /* Save new ebase */
344 vcpu->arch.guest_ebase = gebase;
345
346 /* Build guest exception vectors dynamically in unmapped memory */
347 handler = gebase + 0x2000;
348
349 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
350 refill_start = gebase;
351 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
352 refill_start += 0x080;
353 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
354
355 /* General Exception Entry point */
356 kvm_mips_build_exception(gebase + 0x180, handler);
357
358 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
359 for (i = 0; i < 8; i++) {
360 kvm_debug("L1 Vectored handler @ %p\n",
361 gebase + 0x200 + (i * VECTORSPACING));
362 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
363 handler);
364 }
365
366 /* General exit handler */
367 p = handler;
368 p = kvm_mips_build_exit(p);
369
370 /* Guest entry routine */
371 vcpu->arch.vcpu_run = p;
372 p = kvm_mips_build_vcpu_run(p);
373
374 /* Dump the generated code */
375 pr_debug("#include <asm/asm.h>\n");
376 pr_debug("#include <asm/regdef.h>\n");
377 pr_debug("\n");
378 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
379 dump_handler("kvm_tlb_refill", refill_start, refill_end);
380 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
381 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
382
383 /* Invalidate the icache for these ranges */
384 flush_icache_range((unsigned long)gebase,
385 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
386
387 /*
388 * Allocate comm page for guest kernel, a TLB will be reserved for
389 * mapping GVA @ 0xFFFF8000 to this page
390 */
391 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
392
393 if (!vcpu->arch.kseg0_commpage) {
394 err = -ENOMEM;
395 goto out_free_gebase;
396 }
397
398 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
399 kvm_mips_commpage_init(vcpu);
400
401 /* Init */
402 vcpu->arch.last_sched_cpu = -1;
403 vcpu->arch.last_exec_cpu = -1;
404
405 return vcpu;
406
407 out_free_gebase:
408 kfree(gebase);
409
410 out_uninit_cpu:
411 kvm_vcpu_uninit(vcpu);
412
413 out_free_cpu:
414 kfree(vcpu);
415
416 out:
417 return ERR_PTR(err);
418 }
419
420 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
421 {
422 hrtimer_cancel(&vcpu->arch.comparecount_timer);
423
424 kvm_vcpu_uninit(vcpu);
425
426 kvm_mips_dump_stats(vcpu);
427
428 kvm_mmu_free_memory_caches(vcpu);
429 kfree(vcpu->arch.guest_ebase);
430 kfree(vcpu->arch.kseg0_commpage);
431 kfree(vcpu);
432 }
433
434 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
435 {
436 kvm_arch_vcpu_free(vcpu);
437 }
438
439 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
440 struct kvm_guest_debug *dbg)
441 {
442 return -ENOIOCTLCMD;
443 }
444
445 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
446 {
447 int r = -EINTR;
448 sigset_t sigsaved;
449
450 if (vcpu->sigset_active)
451 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
452
453 if (vcpu->mmio_needed) {
454 if (!vcpu->mmio_is_write)
455 kvm_mips_complete_mmio_load(vcpu, run);
456 vcpu->mmio_needed = 0;
457 }
458
459 if (run->immediate_exit)
460 goto out;
461
462 lose_fpu(1);
463
464 local_irq_disable();
465 guest_enter_irqoff();
466 trace_kvm_enter(vcpu);
467
468 /*
469 * Make sure the read of VCPU requests in vcpu_run() callback is not
470 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
471 * flush request while the requester sees the VCPU as outside of guest
472 * mode and not needing an IPI.
473 */
474 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
475
476 r = kvm_mips_callbacks->vcpu_run(run, vcpu);
477
478 trace_kvm_out(vcpu);
479 guest_exit_irqoff();
480 local_irq_enable();
481
482 out:
483 if (vcpu->sigset_active)
484 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
485
486 return r;
487 }
488
489 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
490 struct kvm_mips_interrupt *irq)
491 {
492 int intr = (int)irq->irq;
493 struct kvm_vcpu *dvcpu = NULL;
494
495 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
496 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
497 (int)intr);
498
499 if (irq->cpu == -1)
500 dvcpu = vcpu;
501 else
502 dvcpu = vcpu->kvm->vcpus[irq->cpu];
503
504 if (intr == 2 || intr == 3 || intr == 4) {
505 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
506
507 } else if (intr == -2 || intr == -3 || intr == -4) {
508 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
509 } else {
510 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
511 irq->cpu, irq->irq);
512 return -EINVAL;
513 }
514
515 dvcpu->arch.wait = 0;
516
517 if (swq_has_sleeper(&dvcpu->wq))
518 swake_up(&dvcpu->wq);
519
520 return 0;
521 }
522
523 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
524 struct kvm_mp_state *mp_state)
525 {
526 return -ENOIOCTLCMD;
527 }
528
529 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
530 struct kvm_mp_state *mp_state)
531 {
532 return -ENOIOCTLCMD;
533 }
534
535 static u64 kvm_mips_get_one_regs[] = {
536 KVM_REG_MIPS_R0,
537 KVM_REG_MIPS_R1,
538 KVM_REG_MIPS_R2,
539 KVM_REG_MIPS_R3,
540 KVM_REG_MIPS_R4,
541 KVM_REG_MIPS_R5,
542 KVM_REG_MIPS_R6,
543 KVM_REG_MIPS_R7,
544 KVM_REG_MIPS_R8,
545 KVM_REG_MIPS_R9,
546 KVM_REG_MIPS_R10,
547 KVM_REG_MIPS_R11,
548 KVM_REG_MIPS_R12,
549 KVM_REG_MIPS_R13,
550 KVM_REG_MIPS_R14,
551 KVM_REG_MIPS_R15,
552 KVM_REG_MIPS_R16,
553 KVM_REG_MIPS_R17,
554 KVM_REG_MIPS_R18,
555 KVM_REG_MIPS_R19,
556 KVM_REG_MIPS_R20,
557 KVM_REG_MIPS_R21,
558 KVM_REG_MIPS_R22,
559 KVM_REG_MIPS_R23,
560 KVM_REG_MIPS_R24,
561 KVM_REG_MIPS_R25,
562 KVM_REG_MIPS_R26,
563 KVM_REG_MIPS_R27,
564 KVM_REG_MIPS_R28,
565 KVM_REG_MIPS_R29,
566 KVM_REG_MIPS_R30,
567 KVM_REG_MIPS_R31,
568
569 #ifndef CONFIG_CPU_MIPSR6
570 KVM_REG_MIPS_HI,
571 KVM_REG_MIPS_LO,
572 #endif
573 KVM_REG_MIPS_PC,
574 };
575
576 static u64 kvm_mips_get_one_regs_fpu[] = {
577 KVM_REG_MIPS_FCR_IR,
578 KVM_REG_MIPS_FCR_CSR,
579 };
580
581 static u64 kvm_mips_get_one_regs_msa[] = {
582 KVM_REG_MIPS_MSA_IR,
583 KVM_REG_MIPS_MSA_CSR,
584 };
585
586 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
587 {
588 unsigned long ret;
589
590 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
591 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
592 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
593 /* odd doubles */
594 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
595 ret += 16;
596 }
597 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
598 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
599 ret += kvm_mips_callbacks->num_regs(vcpu);
600
601 return ret;
602 }
603
604 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
605 {
606 u64 index;
607 unsigned int i;
608
609 if (copy_to_user(indices, kvm_mips_get_one_regs,
610 sizeof(kvm_mips_get_one_regs)))
611 return -EFAULT;
612 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
613
614 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
615 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
616 sizeof(kvm_mips_get_one_regs_fpu)))
617 return -EFAULT;
618 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
619
620 for (i = 0; i < 32; ++i) {
621 index = KVM_REG_MIPS_FPR_32(i);
622 if (copy_to_user(indices, &index, sizeof(index)))
623 return -EFAULT;
624 ++indices;
625
626 /* skip odd doubles if no F64 */
627 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
628 continue;
629
630 index = KVM_REG_MIPS_FPR_64(i);
631 if (copy_to_user(indices, &index, sizeof(index)))
632 return -EFAULT;
633 ++indices;
634 }
635 }
636
637 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
638 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
639 sizeof(kvm_mips_get_one_regs_msa)))
640 return -EFAULT;
641 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
642
643 for (i = 0; i < 32; ++i) {
644 index = KVM_REG_MIPS_VEC_128(i);
645 if (copy_to_user(indices, &index, sizeof(index)))
646 return -EFAULT;
647 ++indices;
648 }
649 }
650
651 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
652 }
653
654 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
655 const struct kvm_one_reg *reg)
656 {
657 struct mips_coproc *cop0 = vcpu->arch.cop0;
658 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
659 int ret;
660 s64 v;
661 s64 vs[2];
662 unsigned int idx;
663
664 switch (reg->id) {
665 /* General purpose registers */
666 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
667 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
668 break;
669 #ifndef CONFIG_CPU_MIPSR6
670 case KVM_REG_MIPS_HI:
671 v = (long)vcpu->arch.hi;
672 break;
673 case KVM_REG_MIPS_LO:
674 v = (long)vcpu->arch.lo;
675 break;
676 #endif
677 case KVM_REG_MIPS_PC:
678 v = (long)vcpu->arch.pc;
679 break;
680
681 /* Floating point registers */
682 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
683 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
684 return -EINVAL;
685 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
686 /* Odd singles in top of even double when FR=0 */
687 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
688 v = get_fpr32(&fpu->fpr[idx], 0);
689 else
690 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
691 break;
692 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
693 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
694 return -EINVAL;
695 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
696 /* Can't access odd doubles in FR=0 mode */
697 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
698 return -EINVAL;
699 v = get_fpr64(&fpu->fpr[idx], 0);
700 break;
701 case KVM_REG_MIPS_FCR_IR:
702 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
703 return -EINVAL;
704 v = boot_cpu_data.fpu_id;
705 break;
706 case KVM_REG_MIPS_FCR_CSR:
707 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
708 return -EINVAL;
709 v = fpu->fcr31;
710 break;
711
712 /* MIPS SIMD Architecture (MSA) registers */
713 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
714 if (!kvm_mips_guest_has_msa(&vcpu->arch))
715 return -EINVAL;
716 /* Can't access MSA registers in FR=0 mode */
717 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
718 return -EINVAL;
719 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
720 #ifdef CONFIG_CPU_LITTLE_ENDIAN
721 /* least significant byte first */
722 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
723 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
724 #else
725 /* most significant byte first */
726 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
727 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
728 #endif
729 break;
730 case KVM_REG_MIPS_MSA_IR:
731 if (!kvm_mips_guest_has_msa(&vcpu->arch))
732 return -EINVAL;
733 v = boot_cpu_data.msa_id;
734 break;
735 case KVM_REG_MIPS_MSA_CSR:
736 if (!kvm_mips_guest_has_msa(&vcpu->arch))
737 return -EINVAL;
738 v = fpu->msacsr;
739 break;
740
741 /* registers to be handled specially */
742 default:
743 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
744 if (ret)
745 return ret;
746 break;
747 }
748 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
749 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
750
751 return put_user(v, uaddr64);
752 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
753 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
754 u32 v32 = (u32)v;
755
756 return put_user(v32, uaddr32);
757 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
758 void __user *uaddr = (void __user *)(long)reg->addr;
759
760 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
761 } else {
762 return -EINVAL;
763 }
764 }
765
766 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
767 const struct kvm_one_reg *reg)
768 {
769 struct mips_coproc *cop0 = vcpu->arch.cop0;
770 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
771 s64 v;
772 s64 vs[2];
773 unsigned int idx;
774
775 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
776 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
777
778 if (get_user(v, uaddr64) != 0)
779 return -EFAULT;
780 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
781 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
782 s32 v32;
783
784 if (get_user(v32, uaddr32) != 0)
785 return -EFAULT;
786 v = (s64)v32;
787 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
788 void __user *uaddr = (void __user *)(long)reg->addr;
789
790 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
791 } else {
792 return -EINVAL;
793 }
794
795 switch (reg->id) {
796 /* General purpose registers */
797 case KVM_REG_MIPS_R0:
798 /* Silently ignore requests to set $0 */
799 break;
800 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
801 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
802 break;
803 #ifndef CONFIG_CPU_MIPSR6
804 case KVM_REG_MIPS_HI:
805 vcpu->arch.hi = v;
806 break;
807 case KVM_REG_MIPS_LO:
808 vcpu->arch.lo = v;
809 break;
810 #endif
811 case KVM_REG_MIPS_PC:
812 vcpu->arch.pc = v;
813 break;
814
815 /* Floating point registers */
816 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
817 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
818 return -EINVAL;
819 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
820 /* Odd singles in top of even double when FR=0 */
821 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
822 set_fpr32(&fpu->fpr[idx], 0, v);
823 else
824 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
825 break;
826 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
827 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
828 return -EINVAL;
829 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
830 /* Can't access odd doubles in FR=0 mode */
831 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
832 return -EINVAL;
833 set_fpr64(&fpu->fpr[idx], 0, v);
834 break;
835 case KVM_REG_MIPS_FCR_IR:
836 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
837 return -EINVAL;
838 /* Read-only */
839 break;
840 case KVM_REG_MIPS_FCR_CSR:
841 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
842 return -EINVAL;
843 fpu->fcr31 = v;
844 break;
845
846 /* MIPS SIMD Architecture (MSA) registers */
847 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
848 if (!kvm_mips_guest_has_msa(&vcpu->arch))
849 return -EINVAL;
850 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
851 #ifdef CONFIG_CPU_LITTLE_ENDIAN
852 /* least significant byte first */
853 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
854 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
855 #else
856 /* most significant byte first */
857 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
858 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
859 #endif
860 break;
861 case KVM_REG_MIPS_MSA_IR:
862 if (!kvm_mips_guest_has_msa(&vcpu->arch))
863 return -EINVAL;
864 /* Read-only */
865 break;
866 case KVM_REG_MIPS_MSA_CSR:
867 if (!kvm_mips_guest_has_msa(&vcpu->arch))
868 return -EINVAL;
869 fpu->msacsr = v;
870 break;
871
872 /* registers to be handled specially */
873 default:
874 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
875 }
876 return 0;
877 }
878
879 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
880 struct kvm_enable_cap *cap)
881 {
882 int r = 0;
883
884 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
885 return -EINVAL;
886 if (cap->flags)
887 return -EINVAL;
888 if (cap->args[0])
889 return -EINVAL;
890
891 switch (cap->cap) {
892 case KVM_CAP_MIPS_FPU:
893 vcpu->arch.fpu_enabled = true;
894 break;
895 case KVM_CAP_MIPS_MSA:
896 vcpu->arch.msa_enabled = true;
897 break;
898 default:
899 r = -EINVAL;
900 break;
901 }
902
903 return r;
904 }
905
906 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
907 unsigned long arg)
908 {
909 struct kvm_vcpu *vcpu = filp->private_data;
910 void __user *argp = (void __user *)arg;
911 long r;
912
913 switch (ioctl) {
914 case KVM_SET_ONE_REG:
915 case KVM_GET_ONE_REG: {
916 struct kvm_one_reg reg;
917
918 if (copy_from_user(&reg, argp, sizeof(reg)))
919 return -EFAULT;
920 if (ioctl == KVM_SET_ONE_REG)
921 return kvm_mips_set_reg(vcpu, &reg);
922 else
923 return kvm_mips_get_reg(vcpu, &reg);
924 }
925 case KVM_GET_REG_LIST: {
926 struct kvm_reg_list __user *user_list = argp;
927 struct kvm_reg_list reg_list;
928 unsigned n;
929
930 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
931 return -EFAULT;
932 n = reg_list.n;
933 reg_list.n = kvm_mips_num_regs(vcpu);
934 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
935 return -EFAULT;
936 if (n < reg_list.n)
937 return -E2BIG;
938 return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
939 }
940 case KVM_INTERRUPT:
941 {
942 struct kvm_mips_interrupt irq;
943
944 if (copy_from_user(&irq, argp, sizeof(irq)))
945 return -EFAULT;
946 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
947 irq.irq);
948
949 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
950 break;
951 }
952 case KVM_ENABLE_CAP: {
953 struct kvm_enable_cap cap;
954
955 if (copy_from_user(&cap, argp, sizeof(cap)))
956 return -EFAULT;
957 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
958 break;
959 }
960 default:
961 r = -ENOIOCTLCMD;
962 }
963 return r;
964 }
965
966 /**
967 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
968 * @kvm: kvm instance
969 * @log: slot id and address to which we copy the log
970 *
971 * Steps 1-4 below provide general overview of dirty page logging. See
972 * kvm_get_dirty_log_protect() function description for additional details.
973 *
974 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
975 * always flush the TLB (step 4) even if previous step failed and the dirty
976 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
977 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
978 * writes will be marked dirty for next log read.
979 *
980 * 1. Take a snapshot of the bit and clear it if needed.
981 * 2. Write protect the corresponding page.
982 * 3. Copy the snapshot to the userspace.
983 * 4. Flush TLB's if needed.
984 */
985 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
986 {
987 struct kvm_memslots *slots;
988 struct kvm_memory_slot *memslot;
989 bool is_dirty = false;
990 int r;
991
992 mutex_lock(&kvm->slots_lock);
993
994 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
995
996 if (is_dirty) {
997 slots = kvm_memslots(kvm);
998 memslot = id_to_memslot(slots, log->slot);
999
1000 /* Let implementation handle TLB/GVA invalidation */
1001 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1002 }
1003
1004 mutex_unlock(&kvm->slots_lock);
1005 return r;
1006 }
1007
1008 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1009 {
1010 long r;
1011
1012 switch (ioctl) {
1013 default:
1014 r = -ENOIOCTLCMD;
1015 }
1016
1017 return r;
1018 }
1019
1020 int kvm_arch_init(void *opaque)
1021 {
1022 if (kvm_mips_callbacks) {
1023 kvm_err("kvm: module already exists\n");
1024 return -EEXIST;
1025 }
1026
1027 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1028 }
1029
1030 void kvm_arch_exit(void)
1031 {
1032 kvm_mips_callbacks = NULL;
1033 }
1034
1035 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1036 struct kvm_sregs *sregs)
1037 {
1038 return -ENOIOCTLCMD;
1039 }
1040
1041 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1042 struct kvm_sregs *sregs)
1043 {
1044 return -ENOIOCTLCMD;
1045 }
1046
1047 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1048 {
1049 }
1050
1051 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1052 {
1053 return -ENOIOCTLCMD;
1054 }
1055
1056 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1057 {
1058 return -ENOIOCTLCMD;
1059 }
1060
1061 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1062 {
1063 return VM_FAULT_SIGBUS;
1064 }
1065
1066 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1067 {
1068 int r;
1069
1070 switch (ext) {
1071 case KVM_CAP_ONE_REG:
1072 case KVM_CAP_ENABLE_CAP:
1073 case KVM_CAP_READONLY_MEM:
1074 case KVM_CAP_SYNC_MMU:
1075 case KVM_CAP_IMMEDIATE_EXIT:
1076 r = 1;
1077 break;
1078 case KVM_CAP_NR_VCPUS:
1079 r = num_online_cpus();
1080 break;
1081 case KVM_CAP_MAX_VCPUS:
1082 r = KVM_MAX_VCPUS;
1083 break;
1084 case KVM_CAP_MIPS_FPU:
1085 /* We don't handle systems with inconsistent cpu_has_fpu */
1086 r = !!raw_cpu_has_fpu;
1087 break;
1088 case KVM_CAP_MIPS_MSA:
1089 /*
1090 * We don't support MSA vector partitioning yet:
1091 * 1) It would require explicit support which can't be tested
1092 * yet due to lack of support in current hardware.
1093 * 2) It extends the state that would need to be saved/restored
1094 * by e.g. QEMU for migration.
1095 *
1096 * When vector partitioning hardware becomes available, support
1097 * could be added by requiring a flag when enabling
1098 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1099 * to save/restore the appropriate extra state.
1100 */
1101 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1102 break;
1103 default:
1104 r = kvm_mips_callbacks->check_extension(kvm, ext);
1105 break;
1106 }
1107 return r;
1108 }
1109
1110 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1111 {
1112 return kvm_mips_pending_timer(vcpu) ||
1113 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1114 }
1115
1116 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1117 {
1118 int i;
1119 struct mips_coproc *cop0;
1120
1121 if (!vcpu)
1122 return -1;
1123
1124 kvm_debug("VCPU Register Dump:\n");
1125 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1126 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1127
1128 for (i = 0; i < 32; i += 4) {
1129 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1130 vcpu->arch.gprs[i],
1131 vcpu->arch.gprs[i + 1],
1132 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1133 }
1134 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1135 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1136
1137 cop0 = vcpu->arch.cop0;
1138 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1139 kvm_read_c0_guest_status(cop0),
1140 kvm_read_c0_guest_cause(cop0));
1141
1142 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1143
1144 return 0;
1145 }
1146
1147 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1148 {
1149 int i;
1150
1151 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1152 vcpu->arch.gprs[i] = regs->gpr[i];
1153 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1154 vcpu->arch.hi = regs->hi;
1155 vcpu->arch.lo = regs->lo;
1156 vcpu->arch.pc = regs->pc;
1157
1158 return 0;
1159 }
1160
1161 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1162 {
1163 int i;
1164
1165 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1166 regs->gpr[i] = vcpu->arch.gprs[i];
1167
1168 regs->hi = vcpu->arch.hi;
1169 regs->lo = vcpu->arch.lo;
1170 regs->pc = vcpu->arch.pc;
1171
1172 return 0;
1173 }
1174
1175 static void kvm_mips_comparecount_func(unsigned long data)
1176 {
1177 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1178
1179 kvm_mips_callbacks->queue_timer_int(vcpu);
1180
1181 vcpu->arch.wait = 0;
1182 if (swq_has_sleeper(&vcpu->wq))
1183 swake_up(&vcpu->wq);
1184 }
1185
1186 /* low level hrtimer wake routine */
1187 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1188 {
1189 struct kvm_vcpu *vcpu;
1190
1191 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1192 kvm_mips_comparecount_func((unsigned long) vcpu);
1193 return kvm_mips_count_timeout(vcpu);
1194 }
1195
1196 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1197 {
1198 int err;
1199
1200 err = kvm_mips_callbacks->vcpu_init(vcpu);
1201 if (err)
1202 return err;
1203
1204 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1205 HRTIMER_MODE_REL);
1206 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1207 return 0;
1208 }
1209
1210 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1211 {
1212 kvm_mips_callbacks->vcpu_uninit(vcpu);
1213 }
1214
1215 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1216 struct kvm_translation *tr)
1217 {
1218 return 0;
1219 }
1220
1221 /* Initial guest state */
1222 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1223 {
1224 return kvm_mips_callbacks->vcpu_setup(vcpu);
1225 }
1226
1227 static void kvm_mips_set_c0_status(void)
1228 {
1229 u32 status = read_c0_status();
1230
1231 if (cpu_has_dsp)
1232 status |= (ST0_MX);
1233
1234 write_c0_status(status);
1235 ehb();
1236 }
1237
1238 /*
1239 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1240 */
1241 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1242 {
1243 u32 cause = vcpu->arch.host_cp0_cause;
1244 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1245 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1246 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1247 enum emulation_result er = EMULATE_DONE;
1248 u32 inst;
1249 int ret = RESUME_GUEST;
1250
1251 vcpu->mode = OUTSIDE_GUEST_MODE;
1252
1253 /* re-enable HTW before enabling interrupts */
1254 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1255 htw_start();
1256
1257 /* Set a default exit reason */
1258 run->exit_reason = KVM_EXIT_UNKNOWN;
1259 run->ready_for_interrupt_injection = 1;
1260
1261 /*
1262 * Set the appropriate status bits based on host CPU features,
1263 * before we hit the scheduler
1264 */
1265 kvm_mips_set_c0_status();
1266
1267 local_irq_enable();
1268
1269 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1270 cause, opc, run, vcpu);
1271 trace_kvm_exit(vcpu, exccode);
1272
1273 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1274 /*
1275 * Do a privilege check, if in UM most of these exit conditions
1276 * end up causing an exception to be delivered to the Guest
1277 * Kernel
1278 */
1279 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1280 if (er == EMULATE_PRIV_FAIL) {
1281 goto skip_emul;
1282 } else if (er == EMULATE_FAIL) {
1283 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1284 ret = RESUME_HOST;
1285 goto skip_emul;
1286 }
1287 }
1288
1289 switch (exccode) {
1290 case EXCCODE_INT:
1291 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1292
1293 ++vcpu->stat.int_exits;
1294
1295 if (need_resched())
1296 cond_resched();
1297
1298 ret = RESUME_GUEST;
1299 break;
1300
1301 case EXCCODE_CPU:
1302 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1303
1304 ++vcpu->stat.cop_unusable_exits;
1305 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1306 /* XXXKYMA: Might need to return to user space */
1307 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1308 ret = RESUME_HOST;
1309 break;
1310
1311 case EXCCODE_MOD:
1312 ++vcpu->stat.tlbmod_exits;
1313 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1314 break;
1315
1316 case EXCCODE_TLBS:
1317 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1318 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1319 badvaddr);
1320
1321 ++vcpu->stat.tlbmiss_st_exits;
1322 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1323 break;
1324
1325 case EXCCODE_TLBL:
1326 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1327 cause, opc, badvaddr);
1328
1329 ++vcpu->stat.tlbmiss_ld_exits;
1330 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1331 break;
1332
1333 case EXCCODE_ADES:
1334 ++vcpu->stat.addrerr_st_exits;
1335 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1336 break;
1337
1338 case EXCCODE_ADEL:
1339 ++vcpu->stat.addrerr_ld_exits;
1340 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1341 break;
1342
1343 case EXCCODE_SYS:
1344 ++vcpu->stat.syscall_exits;
1345 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1346 break;
1347
1348 case EXCCODE_RI:
1349 ++vcpu->stat.resvd_inst_exits;
1350 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1351 break;
1352
1353 case EXCCODE_BP:
1354 ++vcpu->stat.break_inst_exits;
1355 ret = kvm_mips_callbacks->handle_break(vcpu);
1356 break;
1357
1358 case EXCCODE_TR:
1359 ++vcpu->stat.trap_inst_exits;
1360 ret = kvm_mips_callbacks->handle_trap(vcpu);
1361 break;
1362
1363 case EXCCODE_MSAFPE:
1364 ++vcpu->stat.msa_fpe_exits;
1365 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1366 break;
1367
1368 case EXCCODE_FPE:
1369 ++vcpu->stat.fpe_exits;
1370 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1371 break;
1372
1373 case EXCCODE_MSADIS:
1374 ++vcpu->stat.msa_disabled_exits;
1375 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1376 break;
1377
1378 case EXCCODE_GE:
1379 /* defer exit accounting to handler */
1380 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1381 break;
1382
1383 default:
1384 if (cause & CAUSEF_BD)
1385 opc += 1;
1386 inst = 0;
1387 kvm_get_badinstr(opc, vcpu, &inst);
1388 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1389 exccode, opc, inst, badvaddr,
1390 kvm_read_c0_guest_status(vcpu->arch.cop0));
1391 kvm_arch_vcpu_dump_regs(vcpu);
1392 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1393 ret = RESUME_HOST;
1394 break;
1395
1396 }
1397
1398 skip_emul:
1399 local_irq_disable();
1400
1401 if (ret == RESUME_GUEST)
1402 kvm_vz_acquire_htimer(vcpu);
1403
1404 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1405 kvm_mips_deliver_interrupts(vcpu, cause);
1406
1407 if (!(ret & RESUME_HOST)) {
1408 /* Only check for signals if not already exiting to userspace */
1409 if (signal_pending(current)) {
1410 run->exit_reason = KVM_EXIT_INTR;
1411 ret = (-EINTR << 2) | RESUME_HOST;
1412 ++vcpu->stat.signal_exits;
1413 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1414 }
1415 }
1416
1417 if (ret == RESUME_GUEST) {
1418 trace_kvm_reenter(vcpu);
1419
1420 /*
1421 * Make sure the read of VCPU requests in vcpu_reenter()
1422 * callback is not reordered ahead of the write to vcpu->mode,
1423 * or we could miss a TLB flush request while the requester sees
1424 * the VCPU as outside of guest mode and not needing an IPI.
1425 */
1426 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1427
1428 kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1429
1430 /*
1431 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1432 * is live), restore FCR31 / MSACSR.
1433 *
1434 * This should be before returning to the guest exception
1435 * vector, as it may well cause an [MSA] FP exception if there
1436 * are pending exception bits unmasked. (see
1437 * kvm_mips_csr_die_notifier() for how that is handled).
1438 */
1439 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1440 read_c0_status() & ST0_CU1)
1441 __kvm_restore_fcsr(&vcpu->arch);
1442
1443 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1444 read_c0_config5() & MIPS_CONF5_MSAEN)
1445 __kvm_restore_msacsr(&vcpu->arch);
1446 }
1447
1448 /* Disable HTW before returning to guest or host */
1449 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1450 htw_stop();
1451
1452 return ret;
1453 }
1454
1455 /* Enable FPU for guest and restore context */
1456 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1457 {
1458 struct mips_coproc *cop0 = vcpu->arch.cop0;
1459 unsigned int sr, cfg5;
1460
1461 preempt_disable();
1462
1463 sr = kvm_read_c0_guest_status(cop0);
1464
1465 /*
1466 * If MSA state is already live, it is undefined how it interacts with
1467 * FR=0 FPU state, and we don't want to hit reserved instruction
1468 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1469 * play it safe and save it first.
1470 *
1471 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1472 * get called when guest CU1 is set, however we can't trust the guest
1473 * not to clobber the status register directly via the commpage.
1474 */
1475 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1476 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1477 kvm_lose_fpu(vcpu);
1478
1479 /*
1480 * Enable FPU for guest
1481 * We set FR and FRE according to guest context
1482 */
1483 change_c0_status(ST0_CU1 | ST0_FR, sr);
1484 if (cpu_has_fre) {
1485 cfg5 = kvm_read_c0_guest_config5(cop0);
1486 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1487 }
1488 enable_fpu_hazard();
1489
1490 /* If guest FPU state not active, restore it now */
1491 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1492 __kvm_restore_fpu(&vcpu->arch);
1493 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1494 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1495 } else {
1496 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1497 }
1498
1499 preempt_enable();
1500 }
1501
1502 #ifdef CONFIG_CPU_HAS_MSA
1503 /* Enable MSA for guest and restore context */
1504 void kvm_own_msa(struct kvm_vcpu *vcpu)
1505 {
1506 struct mips_coproc *cop0 = vcpu->arch.cop0;
1507 unsigned int sr, cfg5;
1508
1509 preempt_disable();
1510
1511 /*
1512 * Enable FPU if enabled in guest, since we're restoring FPU context
1513 * anyway. We set FR and FRE according to guest context.
1514 */
1515 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1516 sr = kvm_read_c0_guest_status(cop0);
1517
1518 /*
1519 * If FR=0 FPU state is already live, it is undefined how it
1520 * interacts with MSA state, so play it safe and save it first.
1521 */
1522 if (!(sr & ST0_FR) &&
1523 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1524 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1525 kvm_lose_fpu(vcpu);
1526
1527 change_c0_status(ST0_CU1 | ST0_FR, sr);
1528 if (sr & ST0_CU1 && cpu_has_fre) {
1529 cfg5 = kvm_read_c0_guest_config5(cop0);
1530 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1531 }
1532 }
1533
1534 /* Enable MSA for guest */
1535 set_c0_config5(MIPS_CONF5_MSAEN);
1536 enable_fpu_hazard();
1537
1538 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1539 case KVM_MIPS_AUX_FPU:
1540 /*
1541 * Guest FPU state already loaded, only restore upper MSA state
1542 */
1543 __kvm_restore_msa_upper(&vcpu->arch);
1544 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1545 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1546 break;
1547 case 0:
1548 /* Neither FPU or MSA already active, restore full MSA state */
1549 __kvm_restore_msa(&vcpu->arch);
1550 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1551 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1552 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1553 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1554 KVM_TRACE_AUX_FPU_MSA);
1555 break;
1556 default:
1557 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1558 break;
1559 }
1560
1561 preempt_enable();
1562 }
1563 #endif
1564
1565 /* Drop FPU & MSA without saving it */
1566 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1567 {
1568 preempt_disable();
1569 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1570 disable_msa();
1571 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1572 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1573 }
1574 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1575 clear_c0_status(ST0_CU1 | ST0_FR);
1576 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1577 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1578 }
1579 preempt_enable();
1580 }
1581
1582 /* Save and disable FPU & MSA */
1583 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1584 {
1585 /*
1586 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1587 * is disabled in guest context (software), but the register state in
1588 * the hardware may still be in use.
1589 * This is why we explicitly re-enable the hardware before saving.
1590 */
1591
1592 preempt_disable();
1593 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1594 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1595 set_c0_config5(MIPS_CONF5_MSAEN);
1596 enable_fpu_hazard();
1597 }
1598
1599 __kvm_save_msa(&vcpu->arch);
1600 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1601
1602 /* Disable MSA & FPU */
1603 disable_msa();
1604 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1605 clear_c0_status(ST0_CU1 | ST0_FR);
1606 disable_fpu_hazard();
1607 }
1608 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1609 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1610 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1611 set_c0_status(ST0_CU1);
1612 enable_fpu_hazard();
1613 }
1614
1615 __kvm_save_fpu(&vcpu->arch);
1616 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1617 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1618
1619 /* Disable FPU */
1620 clear_c0_status(ST0_CU1 | ST0_FR);
1621 disable_fpu_hazard();
1622 }
1623 preempt_enable();
1624 }
1625
1626 /*
1627 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1628 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1629 * exception if cause bits are set in the value being written.
1630 */
1631 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1632 unsigned long cmd, void *ptr)
1633 {
1634 struct die_args *args = (struct die_args *)ptr;
1635 struct pt_regs *regs = args->regs;
1636 unsigned long pc;
1637
1638 /* Only interested in FPE and MSAFPE */
1639 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1640 return NOTIFY_DONE;
1641
1642 /* Return immediately if guest context isn't active */
1643 if (!(current->flags & PF_VCPU))
1644 return NOTIFY_DONE;
1645
1646 /* Should never get here from user mode */
1647 BUG_ON(user_mode(regs));
1648
1649 pc = instruction_pointer(regs);
1650 switch (cmd) {
1651 case DIE_FP:
1652 /* match 2nd instruction in __kvm_restore_fcsr */
1653 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1654 return NOTIFY_DONE;
1655 break;
1656 case DIE_MSAFP:
1657 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1658 if (!cpu_has_msa ||
1659 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1660 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1661 return NOTIFY_DONE;
1662 break;
1663 }
1664
1665 /* Move PC forward a little and continue executing */
1666 instruction_pointer(regs) += 4;
1667
1668 return NOTIFY_STOP;
1669 }
1670
1671 static struct notifier_block kvm_mips_csr_die_notifier = {
1672 .notifier_call = kvm_mips_csr_die_notify,
1673 };
1674
1675 static int __init kvm_mips_init(void)
1676 {
1677 int ret;
1678
1679 ret = kvm_mips_entry_setup();
1680 if (ret)
1681 return ret;
1682
1683 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1684
1685 if (ret)
1686 return ret;
1687
1688 register_die_notifier(&kvm_mips_csr_die_notifier);
1689
1690 return 0;
1691 }
1692
1693 static void __exit kvm_mips_exit(void)
1694 {
1695 kvm_exit();
1696
1697 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1698 }
1699
1700 module_init(kvm_mips_init);
1701 module_exit(kvm_mips_exit);
1702
1703 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);