2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
20 #include <linux/bootmem.h>
23 #include <asm/cacheflush.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
27 #include <linux/kvm_host.h>
29 #include "interrupt.h"
32 #define CREATE_TRACE_POINTS
36 #define VECTORSPACING 0x100 /* for EI/VI mode */
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
40 struct kvm_stats_debugfs_item debugfs_entries
[] = {
41 { "wait", VCPU_STAT(wait_exits
), KVM_STAT_VCPU
},
42 { "cache", VCPU_STAT(cache_exits
), KVM_STAT_VCPU
},
43 { "signal", VCPU_STAT(signal_exits
), KVM_STAT_VCPU
},
44 { "interrupt", VCPU_STAT(int_exits
), KVM_STAT_VCPU
},
45 { "cop_unsuable", VCPU_STAT(cop_unusable_exits
), KVM_STAT_VCPU
},
46 { "tlbmod", VCPU_STAT(tlbmod_exits
), KVM_STAT_VCPU
},
47 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits
), KVM_STAT_VCPU
},
48 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits
), KVM_STAT_VCPU
},
49 { "addrerr_st", VCPU_STAT(addrerr_st_exits
), KVM_STAT_VCPU
},
50 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits
), KVM_STAT_VCPU
},
51 { "syscall", VCPU_STAT(syscall_exits
), KVM_STAT_VCPU
},
52 { "resvd_inst", VCPU_STAT(resvd_inst_exits
), KVM_STAT_VCPU
},
53 { "break_inst", VCPU_STAT(break_inst_exits
), KVM_STAT_VCPU
},
54 { "trap_inst", VCPU_STAT(trap_inst_exits
), KVM_STAT_VCPU
},
55 { "msa_fpe", VCPU_STAT(msa_fpe_exits
), KVM_STAT_VCPU
},
56 { "fpe", VCPU_STAT(fpe_exits
), KVM_STAT_VCPU
},
57 { "msa_disabled", VCPU_STAT(msa_disabled_exits
), KVM_STAT_VCPU
},
58 { "flush_dcache", VCPU_STAT(flush_dcache_exits
), KVM_STAT_VCPU
},
59 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), KVM_STAT_VCPU
},
60 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), KVM_STAT_VCPU
},
61 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
), KVM_STAT_VCPU
},
62 { "halt_wakeup", VCPU_STAT(halt_wakeup
), KVM_STAT_VCPU
},
66 static int kvm_mips_reset_vcpu(struct kvm_vcpu
*vcpu
)
70 for_each_possible_cpu(i
) {
71 vcpu
->arch
.guest_kernel_asid
[i
] = 0;
72 vcpu
->arch
.guest_user_asid
[i
] = 0;
79 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
80 * Config7, so we are "runnable" if interrupts are pending
82 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
84 return !!(vcpu
->arch
.pending_exceptions
);
87 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
92 int kvm_arch_hardware_enable(void)
97 int kvm_arch_hardware_setup(void)
102 void kvm_arch_check_processor_compat(void *rtn
)
107 static void kvm_mips_init_tlbs(struct kvm
*kvm
)
112 * Add a wired entry to the TLB, it is used to map the commpage to
115 wired
= read_c0_wired();
116 write_c0_wired(wired
+ 1);
118 kvm
->arch
.commpage_tlb
= wired
;
120 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
121 kvm
->arch
.commpage_tlb
);
124 static void kvm_mips_init_vm_percpu(void *arg
)
126 struct kvm
*kvm
= (struct kvm
*)arg
;
128 kvm_mips_init_tlbs(kvm
);
129 kvm_mips_callbacks
->vm_init(kvm
);
133 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
135 if (atomic_inc_return(&kvm_mips_instance
) == 1) {
136 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
138 on_each_cpu(kvm_mips_init_vm_percpu
, kvm
, 1);
144 bool kvm_arch_has_vcpu_debugfs(void)
149 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
154 void kvm_mips_free_vcpus(struct kvm
*kvm
)
157 struct kvm_vcpu
*vcpu
;
159 /* Put the pages we reserved for the guest pmap */
160 for (i
= 0; i
< kvm
->arch
.guest_pmap_npages
; i
++) {
161 if (kvm
->arch
.guest_pmap
[i
] != KVM_INVALID_PAGE
)
162 kvm_release_pfn_clean(kvm
->arch
.guest_pmap
[i
]);
164 kfree(kvm
->arch
.guest_pmap
);
166 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
167 kvm_arch_vcpu_free(vcpu
);
170 mutex_lock(&kvm
->lock
);
172 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
173 kvm
->vcpus
[i
] = NULL
;
175 atomic_set(&kvm
->online_vcpus
, 0);
177 mutex_unlock(&kvm
->lock
);
180 static void kvm_mips_uninit_tlbs(void *arg
)
182 /* Restore wired count */
185 /* Clear out all the TLBs */
186 kvm_local_flush_tlb_all();
189 void kvm_arch_destroy_vm(struct kvm
*kvm
)
191 kvm_mips_free_vcpus(kvm
);
193 /* If this is the last instance, restore wired count */
194 if (atomic_dec_return(&kvm_mips_instance
) == 0) {
195 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
197 on_each_cpu(kvm_mips_uninit_tlbs
, NULL
, 1);
201 long kvm_arch_dev_ioctl(struct file
*filp
, unsigned int ioctl
,
207 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
208 unsigned long npages
)
213 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
214 struct kvm_memory_slot
*memslot
,
215 const struct kvm_userspace_memory_region
*mem
,
216 enum kvm_mr_change change
)
221 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
222 const struct kvm_userspace_memory_region
*mem
,
223 const struct kvm_memory_slot
*old
,
224 const struct kvm_memory_slot
*new,
225 enum kvm_mr_change change
)
227 unsigned long npages
= 0;
230 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
231 __func__
, kvm
, mem
->slot
, mem
->guest_phys_addr
,
232 mem
->memory_size
, mem
->userspace_addr
);
234 /* Setup Guest PMAP table */
235 if (!kvm
->arch
.guest_pmap
) {
237 npages
= mem
->memory_size
>> PAGE_SHIFT
;
240 kvm
->arch
.guest_pmap_npages
= npages
;
241 kvm
->arch
.guest_pmap
=
242 kzalloc(npages
* sizeof(unsigned long), GFP_KERNEL
);
244 if (!kvm
->arch
.guest_pmap
) {
245 kvm_err("Failed to allocate guest PMAP\n");
249 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
250 npages
, kvm
->arch
.guest_pmap
);
252 /* Now setup the page table */
253 for (i
= 0; i
< npages
; i
++)
254 kvm
->arch
.guest_pmap
[i
] = KVM_INVALID_PAGE
;
259 static inline void dump_handler(const char *symbol
, void *start
, void *end
)
263 pr_debug("LEAF(%s)\n", symbol
);
265 pr_debug("\t.set push\n");
266 pr_debug("\t.set noreorder\n");
268 for (p
= start
; p
< (u32
*)end
; ++p
)
269 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p
, p
);
271 pr_debug("\t.set\tpop\n");
273 pr_debug("\tEND(%s)\n", symbol
);
276 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
279 void *gebase
, *p
, *handler
;
282 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
289 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
294 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm
, id
, vcpu
);
297 * Allocate space for host mode exception handlers that handle
300 if (cpu_has_veic
|| cpu_has_vint
)
301 size
= 0x200 + VECTORSPACING
* 64;
305 gebase
= kzalloc(ALIGN(size
, PAGE_SIZE
), GFP_KERNEL
);
311 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
312 ALIGN(size
, PAGE_SIZE
), gebase
);
315 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
316 * limits us to the low 512MB of physical address space. If the memory
317 * we allocate is out of range, just give up now.
319 if (!cpu_has_ebase_wg
&& virt_to_phys(gebase
) >= 0x20000000) {
320 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
323 goto out_free_gebase
;
327 vcpu
->arch
.guest_ebase
= gebase
;
329 /* Build guest exception vectors dynamically in unmapped memory */
330 handler
= gebase
+ 0x2000;
332 /* TLB Refill, EXL = 0 */
333 kvm_mips_build_exception(gebase
, handler
);
335 /* General Exception Entry point */
336 kvm_mips_build_exception(gebase
+ 0x180, handler
);
338 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
339 for (i
= 0; i
< 8; i
++) {
340 kvm_debug("L1 Vectored handler @ %p\n",
341 gebase
+ 0x200 + (i
* VECTORSPACING
));
342 kvm_mips_build_exception(gebase
+ 0x200 + i
* VECTORSPACING
,
346 /* General exit handler */
348 p
= kvm_mips_build_exit(p
);
350 /* Guest entry routine */
351 vcpu
->arch
.vcpu_run
= p
;
352 p
= kvm_mips_build_vcpu_run(p
);
354 /* Dump the generated code */
355 pr_debug("#include <asm/asm.h>\n");
356 pr_debug("#include <asm/regdef.h>\n");
358 dump_handler("kvm_vcpu_run", vcpu
->arch
.vcpu_run
, p
);
359 dump_handler("kvm_gen_exc", gebase
+ 0x180, gebase
+ 0x200);
360 dump_handler("kvm_exit", gebase
+ 0x2000, vcpu
->arch
.vcpu_run
);
362 /* Invalidate the icache for these ranges */
363 local_flush_icache_range((unsigned long)gebase
,
364 (unsigned long)gebase
+ ALIGN(size
, PAGE_SIZE
));
367 * Allocate comm page for guest kernel, a TLB will be reserved for
368 * mapping GVA @ 0xFFFF8000 to this page
370 vcpu
->arch
.kseg0_commpage
= kzalloc(PAGE_SIZE
<< 1, GFP_KERNEL
);
372 if (!vcpu
->arch
.kseg0_commpage
) {
374 goto out_free_gebase
;
377 kvm_debug("Allocated COMM page @ %p\n", vcpu
->arch
.kseg0_commpage
);
378 kvm_mips_commpage_init(vcpu
);
381 vcpu
->arch
.last_sched_cpu
= -1;
383 /* Start off the timer */
384 kvm_mips_init_count(vcpu
);
392 kvm_vcpu_uninit(vcpu
);
401 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
403 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
405 kvm_vcpu_uninit(vcpu
);
407 kvm_mips_dump_stats(vcpu
);
409 kfree(vcpu
->arch
.guest_ebase
);
410 kfree(vcpu
->arch
.kseg0_commpage
);
414 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
416 kvm_arch_vcpu_free(vcpu
);
419 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
420 struct kvm_guest_debug
*dbg
)
425 /* Must be called with preemption disabled, just before entering guest */
426 static void kvm_mips_check_asids(struct kvm_vcpu
*vcpu
)
428 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
429 int i
, cpu
= smp_processor_id();
433 * Lazy host ASID regeneration for guest user mode.
434 * If the guest ASID has changed since the last guest usermode
435 * execution, regenerate the host ASID so as to invalidate stale TLB
438 if (!KVM_GUEST_KERNEL_MODE(vcpu
)) {
439 gasid
= kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
;
440 if (gasid
!= vcpu
->arch
.last_user_gasid
) {
441 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
,
443 vcpu
->arch
.guest_user_asid
[cpu
] =
444 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
445 for_each_possible_cpu(i
)
447 vcpu
->arch
.guest_user_asid
[cpu
] = 0;
448 vcpu
->arch
.last_user_gasid
= gasid
;
453 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
458 if (vcpu
->sigset_active
)
459 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
461 if (vcpu
->mmio_needed
) {
462 if (!vcpu
->mmio_is_write
)
463 kvm_mips_complete_mmio_load(vcpu
, run
);
464 vcpu
->mmio_needed
= 0;
470 /* Check if we have any exceptions/interrupts pending */
471 kvm_mips_deliver_interrupts(vcpu
,
472 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
474 guest_enter_irqoff();
476 /* Disable hardware page table walking while in guest */
479 trace_kvm_enter(vcpu
);
481 kvm_mips_check_asids(vcpu
);
483 r
= vcpu
->arch
.vcpu_run(run
, vcpu
);
486 /* Re-enable HTW before enabling interrupts */
492 if (vcpu
->sigset_active
)
493 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
498 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
499 struct kvm_mips_interrupt
*irq
)
501 int intr
= (int)irq
->irq
;
502 struct kvm_vcpu
*dvcpu
= NULL
;
504 if (intr
== 3 || intr
== -3 || intr
== 4 || intr
== -4)
505 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__
, irq
->cpu
,
511 dvcpu
= vcpu
->kvm
->vcpus
[irq
->cpu
];
513 if (intr
== 2 || intr
== 3 || intr
== 4) {
514 kvm_mips_callbacks
->queue_io_int(dvcpu
, irq
);
516 } else if (intr
== -2 || intr
== -3 || intr
== -4) {
517 kvm_mips_callbacks
->dequeue_io_int(dvcpu
, irq
);
519 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__
,
524 dvcpu
->arch
.wait
= 0;
526 if (swait_active(&dvcpu
->wq
))
527 swake_up(&dvcpu
->wq
);
532 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
533 struct kvm_mp_state
*mp_state
)
538 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
539 struct kvm_mp_state
*mp_state
)
544 static u64 kvm_mips_get_one_regs
[] = {
578 #ifndef CONFIG_CPU_MIPSR6
584 KVM_REG_MIPS_CP0_INDEX
,
585 KVM_REG_MIPS_CP0_CONTEXT
,
586 KVM_REG_MIPS_CP0_USERLOCAL
,
587 KVM_REG_MIPS_CP0_PAGEMASK
,
588 KVM_REG_MIPS_CP0_WIRED
,
589 KVM_REG_MIPS_CP0_HWRENA
,
590 KVM_REG_MIPS_CP0_BADVADDR
,
591 KVM_REG_MIPS_CP0_COUNT
,
592 KVM_REG_MIPS_CP0_ENTRYHI
,
593 KVM_REG_MIPS_CP0_COMPARE
,
594 KVM_REG_MIPS_CP0_STATUS
,
595 KVM_REG_MIPS_CP0_CAUSE
,
596 KVM_REG_MIPS_CP0_EPC
,
597 KVM_REG_MIPS_CP0_PRID
,
598 KVM_REG_MIPS_CP0_CONFIG
,
599 KVM_REG_MIPS_CP0_CONFIG1
,
600 KVM_REG_MIPS_CP0_CONFIG2
,
601 KVM_REG_MIPS_CP0_CONFIG3
,
602 KVM_REG_MIPS_CP0_CONFIG4
,
603 KVM_REG_MIPS_CP0_CONFIG5
,
604 KVM_REG_MIPS_CP0_CONFIG7
,
605 KVM_REG_MIPS_CP0_ERROREPC
,
607 KVM_REG_MIPS_COUNT_CTL
,
608 KVM_REG_MIPS_COUNT_RESUME
,
609 KVM_REG_MIPS_COUNT_HZ
,
612 static u64 kvm_mips_get_one_regs_fpu
[] = {
614 KVM_REG_MIPS_FCR_CSR
,
617 static u64 kvm_mips_get_one_regs_msa
[] = {
619 KVM_REG_MIPS_MSA_CSR
,
622 static u64 kvm_mips_get_one_regs_kscratch
[] = {
623 KVM_REG_MIPS_CP0_KSCRATCH1
,
624 KVM_REG_MIPS_CP0_KSCRATCH2
,
625 KVM_REG_MIPS_CP0_KSCRATCH3
,
626 KVM_REG_MIPS_CP0_KSCRATCH4
,
627 KVM_REG_MIPS_CP0_KSCRATCH5
,
628 KVM_REG_MIPS_CP0_KSCRATCH6
,
631 static unsigned long kvm_mips_num_regs(struct kvm_vcpu
*vcpu
)
635 ret
= ARRAY_SIZE(kvm_mips_get_one_regs
);
636 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
)) {
637 ret
+= ARRAY_SIZE(kvm_mips_get_one_regs_fpu
) + 48;
639 if (boot_cpu_data
.fpu_id
& MIPS_FPIR_F64
)
642 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
643 ret
+= ARRAY_SIZE(kvm_mips_get_one_regs_msa
) + 32;
644 ret
+= __arch_hweight8(vcpu
->arch
.kscratch_enabled
);
645 ret
+= kvm_mips_callbacks
->num_regs(vcpu
);
650 static int kvm_mips_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*indices
)
655 if (copy_to_user(indices
, kvm_mips_get_one_regs
,
656 sizeof(kvm_mips_get_one_regs
)))
658 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs
);
660 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
)) {
661 if (copy_to_user(indices
, kvm_mips_get_one_regs_fpu
,
662 sizeof(kvm_mips_get_one_regs_fpu
)))
664 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs_fpu
);
666 for (i
= 0; i
< 32; ++i
) {
667 index
= KVM_REG_MIPS_FPR_32(i
);
668 if (copy_to_user(indices
, &index
, sizeof(index
)))
672 /* skip odd doubles if no F64 */
673 if (i
& 1 && !(boot_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
676 index
= KVM_REG_MIPS_FPR_64(i
);
677 if (copy_to_user(indices
, &index
, sizeof(index
)))
683 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
)) {
684 if (copy_to_user(indices
, kvm_mips_get_one_regs_msa
,
685 sizeof(kvm_mips_get_one_regs_msa
)))
687 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs_msa
);
689 for (i
= 0; i
< 32; ++i
) {
690 index
= KVM_REG_MIPS_VEC_128(i
);
691 if (copy_to_user(indices
, &index
, sizeof(index
)))
697 for (i
= 0; i
< 6; ++i
) {
698 if (!(vcpu
->arch
.kscratch_enabled
& BIT(i
+ 2)))
701 if (copy_to_user(indices
, &kvm_mips_get_one_regs_kscratch
[i
],
702 sizeof(kvm_mips_get_one_regs_kscratch
[i
])))
707 return kvm_mips_callbacks
->copy_reg_indices(vcpu
, indices
);
710 static int kvm_mips_get_reg(struct kvm_vcpu
*vcpu
,
711 const struct kvm_one_reg
*reg
)
713 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
714 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
721 /* General purpose registers */
722 case KVM_REG_MIPS_R0
... KVM_REG_MIPS_R31
:
723 v
= (long)vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
];
725 #ifndef CONFIG_CPU_MIPSR6
726 case KVM_REG_MIPS_HI
:
727 v
= (long)vcpu
->arch
.hi
;
729 case KVM_REG_MIPS_LO
:
730 v
= (long)vcpu
->arch
.lo
;
733 case KVM_REG_MIPS_PC
:
734 v
= (long)vcpu
->arch
.pc
;
737 /* Floating point registers */
738 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
739 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
741 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
742 /* Odd singles in top of even double when FR=0 */
743 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
744 v
= get_fpr32(&fpu
->fpr
[idx
], 0);
746 v
= get_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1);
748 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
749 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
751 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
752 /* Can't access odd doubles in FR=0 mode */
753 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
755 v
= get_fpr64(&fpu
->fpr
[idx
], 0);
757 case KVM_REG_MIPS_FCR_IR
:
758 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
760 v
= boot_cpu_data
.fpu_id
;
762 case KVM_REG_MIPS_FCR_CSR
:
763 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
768 /* MIPS SIMD Architecture (MSA) registers */
769 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
770 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
772 /* Can't access MSA registers in FR=0 mode */
773 if (!(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
775 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
776 #ifdef CONFIG_CPU_LITTLE_ENDIAN
777 /* least significant byte first */
778 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 0);
779 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 1);
781 /* most significant byte first */
782 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 1);
783 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 0);
786 case KVM_REG_MIPS_MSA_IR
:
787 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
789 v
= boot_cpu_data
.msa_id
;
791 case KVM_REG_MIPS_MSA_CSR
:
792 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
797 /* Co-processor 0 registers */
798 case KVM_REG_MIPS_CP0_INDEX
:
799 v
= (long)kvm_read_c0_guest_index(cop0
);
801 case KVM_REG_MIPS_CP0_CONTEXT
:
802 v
= (long)kvm_read_c0_guest_context(cop0
);
804 case KVM_REG_MIPS_CP0_USERLOCAL
:
805 v
= (long)kvm_read_c0_guest_userlocal(cop0
);
807 case KVM_REG_MIPS_CP0_PAGEMASK
:
808 v
= (long)kvm_read_c0_guest_pagemask(cop0
);
810 case KVM_REG_MIPS_CP0_WIRED
:
811 v
= (long)kvm_read_c0_guest_wired(cop0
);
813 case KVM_REG_MIPS_CP0_HWRENA
:
814 v
= (long)kvm_read_c0_guest_hwrena(cop0
);
816 case KVM_REG_MIPS_CP0_BADVADDR
:
817 v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
819 case KVM_REG_MIPS_CP0_ENTRYHI
:
820 v
= (long)kvm_read_c0_guest_entryhi(cop0
);
822 case KVM_REG_MIPS_CP0_COMPARE
:
823 v
= (long)kvm_read_c0_guest_compare(cop0
);
825 case KVM_REG_MIPS_CP0_STATUS
:
826 v
= (long)kvm_read_c0_guest_status(cop0
);
828 case KVM_REG_MIPS_CP0_CAUSE
:
829 v
= (long)kvm_read_c0_guest_cause(cop0
);
831 case KVM_REG_MIPS_CP0_EPC
:
832 v
= (long)kvm_read_c0_guest_epc(cop0
);
834 case KVM_REG_MIPS_CP0_PRID
:
835 v
= (long)kvm_read_c0_guest_prid(cop0
);
837 case KVM_REG_MIPS_CP0_CONFIG
:
838 v
= (long)kvm_read_c0_guest_config(cop0
);
840 case KVM_REG_MIPS_CP0_CONFIG1
:
841 v
= (long)kvm_read_c0_guest_config1(cop0
);
843 case KVM_REG_MIPS_CP0_CONFIG2
:
844 v
= (long)kvm_read_c0_guest_config2(cop0
);
846 case KVM_REG_MIPS_CP0_CONFIG3
:
847 v
= (long)kvm_read_c0_guest_config3(cop0
);
849 case KVM_REG_MIPS_CP0_CONFIG4
:
850 v
= (long)kvm_read_c0_guest_config4(cop0
);
852 case KVM_REG_MIPS_CP0_CONFIG5
:
853 v
= (long)kvm_read_c0_guest_config5(cop0
);
855 case KVM_REG_MIPS_CP0_CONFIG7
:
856 v
= (long)kvm_read_c0_guest_config7(cop0
);
858 case KVM_REG_MIPS_CP0_ERROREPC
:
859 v
= (long)kvm_read_c0_guest_errorepc(cop0
);
861 case KVM_REG_MIPS_CP0_KSCRATCH1
... KVM_REG_MIPS_CP0_KSCRATCH6
:
862 idx
= reg
->id
- KVM_REG_MIPS_CP0_KSCRATCH1
+ 2;
863 if (!(vcpu
->arch
.kscratch_enabled
& BIT(idx
)))
867 v
= (long)kvm_read_c0_guest_kscratch1(cop0
);
870 v
= (long)kvm_read_c0_guest_kscratch2(cop0
);
873 v
= (long)kvm_read_c0_guest_kscratch3(cop0
);
876 v
= (long)kvm_read_c0_guest_kscratch4(cop0
);
879 v
= (long)kvm_read_c0_guest_kscratch5(cop0
);
882 v
= (long)kvm_read_c0_guest_kscratch6(cop0
);
886 /* registers to be handled specially */
888 ret
= kvm_mips_callbacks
->get_one_reg(vcpu
, reg
, &v
);
893 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
894 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
896 return put_user(v
, uaddr64
);
897 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
898 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
901 return put_user(v32
, uaddr32
);
902 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
903 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
905 return copy_to_user(uaddr
, vs
, 16) ? -EFAULT
: 0;
911 static int kvm_mips_set_reg(struct kvm_vcpu
*vcpu
,
912 const struct kvm_one_reg
*reg
)
914 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
915 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
920 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
921 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
923 if (get_user(v
, uaddr64
) != 0)
925 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
926 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
929 if (get_user(v32
, uaddr32
) != 0)
932 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
933 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
935 return copy_from_user(vs
, uaddr
, 16) ? -EFAULT
: 0;
941 /* General purpose registers */
942 case KVM_REG_MIPS_R0
:
943 /* Silently ignore requests to set $0 */
945 case KVM_REG_MIPS_R1
... KVM_REG_MIPS_R31
:
946 vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
] = v
;
948 #ifndef CONFIG_CPU_MIPSR6
949 case KVM_REG_MIPS_HI
:
952 case KVM_REG_MIPS_LO
:
956 case KVM_REG_MIPS_PC
:
960 /* Floating point registers */
961 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
962 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
964 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
965 /* Odd singles in top of even double when FR=0 */
966 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
967 set_fpr32(&fpu
->fpr
[idx
], 0, v
);
969 set_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1, v
);
971 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
972 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
974 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
975 /* Can't access odd doubles in FR=0 mode */
976 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
978 set_fpr64(&fpu
->fpr
[idx
], 0, v
);
980 case KVM_REG_MIPS_FCR_IR
:
981 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
985 case KVM_REG_MIPS_FCR_CSR
:
986 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
991 /* MIPS SIMD Architecture (MSA) registers */
992 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
993 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
995 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
996 #ifdef CONFIG_CPU_LITTLE_ENDIAN
997 /* least significant byte first */
998 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[0]);
999 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[1]);
1001 /* most significant byte first */
1002 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[0]);
1003 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[1]);
1006 case KVM_REG_MIPS_MSA_IR
:
1007 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
1011 case KVM_REG_MIPS_MSA_CSR
:
1012 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
1017 /* Co-processor 0 registers */
1018 case KVM_REG_MIPS_CP0_INDEX
:
1019 kvm_write_c0_guest_index(cop0
, v
);
1021 case KVM_REG_MIPS_CP0_CONTEXT
:
1022 kvm_write_c0_guest_context(cop0
, v
);
1024 case KVM_REG_MIPS_CP0_USERLOCAL
:
1025 kvm_write_c0_guest_userlocal(cop0
, v
);
1027 case KVM_REG_MIPS_CP0_PAGEMASK
:
1028 kvm_write_c0_guest_pagemask(cop0
, v
);
1030 case KVM_REG_MIPS_CP0_WIRED
:
1031 kvm_write_c0_guest_wired(cop0
, v
);
1033 case KVM_REG_MIPS_CP0_HWRENA
:
1034 kvm_write_c0_guest_hwrena(cop0
, v
);
1036 case KVM_REG_MIPS_CP0_BADVADDR
:
1037 kvm_write_c0_guest_badvaddr(cop0
, v
);
1039 case KVM_REG_MIPS_CP0_ENTRYHI
:
1040 kvm_write_c0_guest_entryhi(cop0
, v
);
1042 case KVM_REG_MIPS_CP0_STATUS
:
1043 kvm_write_c0_guest_status(cop0
, v
);
1045 case KVM_REG_MIPS_CP0_EPC
:
1046 kvm_write_c0_guest_epc(cop0
, v
);
1048 case KVM_REG_MIPS_CP0_PRID
:
1049 kvm_write_c0_guest_prid(cop0
, v
);
1051 case KVM_REG_MIPS_CP0_ERROREPC
:
1052 kvm_write_c0_guest_errorepc(cop0
, v
);
1054 case KVM_REG_MIPS_CP0_KSCRATCH1
... KVM_REG_MIPS_CP0_KSCRATCH6
:
1055 idx
= reg
->id
- KVM_REG_MIPS_CP0_KSCRATCH1
+ 2;
1056 if (!(vcpu
->arch
.kscratch_enabled
& BIT(idx
)))
1060 kvm_write_c0_guest_kscratch1(cop0
, v
);
1063 kvm_write_c0_guest_kscratch2(cop0
, v
);
1066 kvm_write_c0_guest_kscratch3(cop0
, v
);
1069 kvm_write_c0_guest_kscratch4(cop0
, v
);
1072 kvm_write_c0_guest_kscratch5(cop0
, v
);
1075 kvm_write_c0_guest_kscratch6(cop0
, v
);
1079 /* registers to be handled specially */
1081 return kvm_mips_callbacks
->set_one_reg(vcpu
, reg
, v
);
1086 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1087 struct kvm_enable_cap
*cap
)
1091 if (!kvm_vm_ioctl_check_extension(vcpu
->kvm
, cap
->cap
))
1099 case KVM_CAP_MIPS_FPU
:
1100 vcpu
->arch
.fpu_enabled
= true;
1102 case KVM_CAP_MIPS_MSA
:
1103 vcpu
->arch
.msa_enabled
= true;
1113 long kvm_arch_vcpu_ioctl(struct file
*filp
, unsigned int ioctl
,
1116 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1117 void __user
*argp
= (void __user
*)arg
;
1121 case KVM_SET_ONE_REG
:
1122 case KVM_GET_ONE_REG
: {
1123 struct kvm_one_reg reg
;
1125 if (copy_from_user(®
, argp
, sizeof(reg
)))
1127 if (ioctl
== KVM_SET_ONE_REG
)
1128 return kvm_mips_set_reg(vcpu
, ®
);
1130 return kvm_mips_get_reg(vcpu
, ®
);
1132 case KVM_GET_REG_LIST
: {
1133 struct kvm_reg_list __user
*user_list
= argp
;
1134 struct kvm_reg_list reg_list
;
1137 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
1140 reg_list
.n
= kvm_mips_num_regs(vcpu
);
1141 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
1145 return kvm_mips_copy_reg_indices(vcpu
, user_list
->reg
);
1148 /* Treat the NMI as a CPU reset */
1149 r
= kvm_mips_reset_vcpu(vcpu
);
1153 struct kvm_mips_interrupt irq
;
1156 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1159 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
,
1162 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1165 case KVM_ENABLE_CAP
: {
1166 struct kvm_enable_cap cap
;
1169 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1171 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1182 /* Get (and clear) the dirty memory log for a memory slot. */
1183 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1185 struct kvm_memslots
*slots
;
1186 struct kvm_memory_slot
*memslot
;
1187 unsigned long ga
, ga_end
;
1192 mutex_lock(&kvm
->slots_lock
);
1194 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1198 /* If nothing is dirty, don't bother messing with page tables. */
1200 slots
= kvm_memslots(kvm
);
1201 memslot
= id_to_memslot(slots
, log
->slot
);
1203 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
1204 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
1206 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__
, ga
,
1209 n
= kvm_dirty_bitmap_bytes(memslot
);
1210 memset(memslot
->dirty_bitmap
, 0, n
);
1215 mutex_unlock(&kvm
->slots_lock
);
1220 long kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
1232 int kvm_arch_init(void *opaque
)
1234 if (kvm_mips_callbacks
) {
1235 kvm_err("kvm: module already exists\n");
1239 return kvm_mips_emulation_init(&kvm_mips_callbacks
);
1242 void kvm_arch_exit(void)
1244 kvm_mips_callbacks
= NULL
;
1247 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1248 struct kvm_sregs
*sregs
)
1250 return -ENOIOCTLCMD
;
1253 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1254 struct kvm_sregs
*sregs
)
1256 return -ENOIOCTLCMD
;
1259 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1263 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1265 return -ENOIOCTLCMD
;
1268 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1270 return -ENOIOCTLCMD
;
1273 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1275 return VM_FAULT_SIGBUS
;
1278 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
1283 case KVM_CAP_ONE_REG
:
1284 case KVM_CAP_ENABLE_CAP
:
1287 case KVM_CAP_COALESCED_MMIO
:
1288 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
1290 case KVM_CAP_MIPS_FPU
:
1291 /* We don't handle systems with inconsistent cpu_has_fpu */
1292 r
= !!raw_cpu_has_fpu
;
1294 case KVM_CAP_MIPS_MSA
:
1296 * We don't support MSA vector partitioning yet:
1297 * 1) It would require explicit support which can't be tested
1298 * yet due to lack of support in current hardware.
1299 * 2) It extends the state that would need to be saved/restored
1300 * by e.g. QEMU for migration.
1302 * When vector partitioning hardware becomes available, support
1303 * could be added by requiring a flag when enabling
1304 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1305 * to save/restore the appropriate extra state.
1307 r
= cpu_has_msa
&& !(boot_cpu_data
.msa_id
& MSA_IR_WRPF
);
1316 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1318 return kvm_mips_pending_timer(vcpu
);
1321 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
1324 struct mips_coproc
*cop0
;
1329 kvm_debug("VCPU Register Dump:\n");
1330 kvm_debug("\tpc = 0x%08lx\n", vcpu
->arch
.pc
);
1331 kvm_debug("\texceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
1333 for (i
= 0; i
< 32; i
+= 4) {
1334 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i
,
1336 vcpu
->arch
.gprs
[i
+ 1],
1337 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
1339 kvm_debug("\thi: 0x%08lx\n", vcpu
->arch
.hi
);
1340 kvm_debug("\tlo: 0x%08lx\n", vcpu
->arch
.lo
);
1342 cop0
= vcpu
->arch
.cop0
;
1343 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1344 kvm_read_c0_guest_status(cop0
),
1345 kvm_read_c0_guest_cause(cop0
));
1347 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0
));
1352 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1356 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1357 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
1358 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
1359 vcpu
->arch
.hi
= regs
->hi
;
1360 vcpu
->arch
.lo
= regs
->lo
;
1361 vcpu
->arch
.pc
= regs
->pc
;
1366 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1370 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1371 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
1373 regs
->hi
= vcpu
->arch
.hi
;
1374 regs
->lo
= vcpu
->arch
.lo
;
1375 regs
->pc
= vcpu
->arch
.pc
;
1380 static void kvm_mips_comparecount_func(unsigned long data
)
1382 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
1384 kvm_mips_callbacks
->queue_timer_int(vcpu
);
1386 vcpu
->arch
.wait
= 0;
1387 if (swait_active(&vcpu
->wq
))
1388 swake_up(&vcpu
->wq
);
1391 /* low level hrtimer wake routine */
1392 static enum hrtimer_restart
kvm_mips_comparecount_wakeup(struct hrtimer
*timer
)
1394 struct kvm_vcpu
*vcpu
;
1396 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.comparecount_timer
);
1397 kvm_mips_comparecount_func((unsigned long) vcpu
);
1398 return kvm_mips_count_timeout(vcpu
);
1401 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1403 kvm_mips_callbacks
->vcpu_init(vcpu
);
1404 hrtimer_init(&vcpu
->arch
.comparecount_timer
, CLOCK_MONOTONIC
,
1406 vcpu
->arch
.comparecount_timer
.function
= kvm_mips_comparecount_wakeup
;
1410 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1411 struct kvm_translation
*tr
)
1416 /* Initial guest state */
1417 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1419 return kvm_mips_callbacks
->vcpu_setup(vcpu
);
1422 static void kvm_mips_set_c0_status(void)
1424 u32 status
= read_c0_status();
1429 write_c0_status(status
);
1434 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1436 int kvm_mips_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1438 u32 cause
= vcpu
->arch
.host_cp0_cause
;
1439 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1440 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
1441 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1442 enum emulation_result er
= EMULATE_DONE
;
1443 int ret
= RESUME_GUEST
;
1445 /* re-enable HTW before enabling interrupts */
1448 /* Set a default exit reason */
1449 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1450 run
->ready_for_interrupt_injection
= 1;
1453 * Set the appropriate status bits based on host CPU features,
1454 * before we hit the scheduler
1456 kvm_mips_set_c0_status();
1460 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1461 cause
, opc
, run
, vcpu
);
1462 trace_kvm_exit(vcpu
, exccode
);
1465 * Do a privilege check, if in UM most of these exit conditions end up
1466 * causing an exception to be delivered to the Guest Kernel
1468 er
= kvm_mips_check_privilege(cause
, opc
, run
, vcpu
);
1469 if (er
== EMULATE_PRIV_FAIL
) {
1471 } else if (er
== EMULATE_FAIL
) {
1472 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1479 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu
->vcpu_id
, opc
);
1481 ++vcpu
->stat
.int_exits
;
1490 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc
);
1492 ++vcpu
->stat
.cop_unusable_exits
;
1493 ret
= kvm_mips_callbacks
->handle_cop_unusable(vcpu
);
1494 /* XXXKYMA: Might need to return to user space */
1495 if (run
->exit_reason
== KVM_EXIT_IRQ_WINDOW_OPEN
)
1500 ++vcpu
->stat
.tlbmod_exits
;
1501 ret
= kvm_mips_callbacks
->handle_tlb_mod(vcpu
);
1505 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1506 cause
, kvm_read_c0_guest_status(vcpu
->arch
.cop0
), opc
,
1509 ++vcpu
->stat
.tlbmiss_st_exits
;
1510 ret
= kvm_mips_callbacks
->handle_tlb_st_miss(vcpu
);
1514 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1515 cause
, opc
, badvaddr
);
1517 ++vcpu
->stat
.tlbmiss_ld_exits
;
1518 ret
= kvm_mips_callbacks
->handle_tlb_ld_miss(vcpu
);
1522 ++vcpu
->stat
.addrerr_st_exits
;
1523 ret
= kvm_mips_callbacks
->handle_addr_err_st(vcpu
);
1527 ++vcpu
->stat
.addrerr_ld_exits
;
1528 ret
= kvm_mips_callbacks
->handle_addr_err_ld(vcpu
);
1532 ++vcpu
->stat
.syscall_exits
;
1533 ret
= kvm_mips_callbacks
->handle_syscall(vcpu
);
1537 ++vcpu
->stat
.resvd_inst_exits
;
1538 ret
= kvm_mips_callbacks
->handle_res_inst(vcpu
);
1542 ++vcpu
->stat
.break_inst_exits
;
1543 ret
= kvm_mips_callbacks
->handle_break(vcpu
);
1547 ++vcpu
->stat
.trap_inst_exits
;
1548 ret
= kvm_mips_callbacks
->handle_trap(vcpu
);
1551 case EXCCODE_MSAFPE
:
1552 ++vcpu
->stat
.msa_fpe_exits
;
1553 ret
= kvm_mips_callbacks
->handle_msa_fpe(vcpu
);
1557 ++vcpu
->stat
.fpe_exits
;
1558 ret
= kvm_mips_callbacks
->handle_fpe(vcpu
);
1561 case EXCCODE_MSADIS
:
1562 ++vcpu
->stat
.msa_disabled_exits
;
1563 ret
= kvm_mips_callbacks
->handle_msa_disabled(vcpu
);
1567 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1568 exccode
, opc
, kvm_get_inst(opc
, vcpu
), badvaddr
,
1569 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
1570 kvm_arch_vcpu_dump_regs(vcpu
);
1571 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1578 local_irq_disable();
1580 if (er
== EMULATE_DONE
&& !(ret
& RESUME_HOST
))
1581 kvm_mips_deliver_interrupts(vcpu
, cause
);
1583 if (!(ret
& RESUME_HOST
)) {
1584 /* Only check for signals if not already exiting to userspace */
1585 if (signal_pending(current
)) {
1586 run
->exit_reason
= KVM_EXIT_INTR
;
1587 ret
= (-EINTR
<< 2) | RESUME_HOST
;
1588 ++vcpu
->stat
.signal_exits
;
1589 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_SIGNAL
);
1593 if (ret
== RESUME_GUEST
) {
1594 trace_kvm_reenter(vcpu
);
1596 kvm_mips_check_asids(vcpu
);
1599 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1600 * is live), restore FCR31 / MSACSR.
1602 * This should be before returning to the guest exception
1603 * vector, as it may well cause an [MSA] FP exception if there
1604 * are pending exception bits unmasked. (see
1605 * kvm_mips_csr_die_notifier() for how that is handled).
1607 if (kvm_mips_guest_has_fpu(&vcpu
->arch
) &&
1608 read_c0_status() & ST0_CU1
)
1609 __kvm_restore_fcsr(&vcpu
->arch
);
1611 if (kvm_mips_guest_has_msa(&vcpu
->arch
) &&
1612 read_c0_config5() & MIPS_CONF5_MSAEN
)
1613 __kvm_restore_msacsr(&vcpu
->arch
);
1616 /* Disable HTW before returning to guest or host */
1622 /* Enable FPU for guest and restore context */
1623 void kvm_own_fpu(struct kvm_vcpu
*vcpu
)
1625 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1626 unsigned int sr
, cfg5
;
1630 sr
= kvm_read_c0_guest_status(cop0
);
1633 * If MSA state is already live, it is undefined how it interacts with
1634 * FR=0 FPU state, and we don't want to hit reserved instruction
1635 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1636 * play it safe and save it first.
1638 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1639 * get called when guest CU1 is set, however we can't trust the guest
1640 * not to clobber the status register directly via the commpage.
1642 if (cpu_has_msa
&& sr
& ST0_CU1
&& !(sr
& ST0_FR
) &&
1643 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1647 * Enable FPU for guest
1648 * We set FR and FRE according to guest context
1650 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1652 cfg5
= kvm_read_c0_guest_config5(cop0
);
1653 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1655 enable_fpu_hazard();
1657 /* If guest FPU state not active, restore it now */
1658 if (!(vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)) {
1659 __kvm_restore_fpu(&vcpu
->arch
);
1660 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_FPU
;
1661 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_FPU
);
1663 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_ENABLE
, KVM_TRACE_AUX_FPU
);
1669 #ifdef CONFIG_CPU_HAS_MSA
1670 /* Enable MSA for guest and restore context */
1671 void kvm_own_msa(struct kvm_vcpu
*vcpu
)
1673 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1674 unsigned int sr
, cfg5
;
1679 * Enable FPU if enabled in guest, since we're restoring FPU context
1680 * anyway. We set FR and FRE according to guest context.
1682 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
1683 sr
= kvm_read_c0_guest_status(cop0
);
1686 * If FR=0 FPU state is already live, it is undefined how it
1687 * interacts with MSA state, so play it safe and save it first.
1689 if (!(sr
& ST0_FR
) &&
1690 (vcpu
->arch
.aux_inuse
& (KVM_MIPS_AUX_FPU
|
1691 KVM_MIPS_AUX_MSA
)) == KVM_MIPS_AUX_FPU
)
1694 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1695 if (sr
& ST0_CU1
&& cpu_has_fre
) {
1696 cfg5
= kvm_read_c0_guest_config5(cop0
);
1697 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1701 /* Enable MSA for guest */
1702 set_c0_config5(MIPS_CONF5_MSAEN
);
1703 enable_fpu_hazard();
1705 switch (vcpu
->arch
.aux_inuse
& (KVM_MIPS_AUX_FPU
| KVM_MIPS_AUX_MSA
)) {
1706 case KVM_MIPS_AUX_FPU
:
1708 * Guest FPU state already loaded, only restore upper MSA state
1710 __kvm_restore_msa_upper(&vcpu
->arch
);
1711 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_MSA
;
1712 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_MSA
);
1715 /* Neither FPU or MSA already active, restore full MSA state */
1716 __kvm_restore_msa(&vcpu
->arch
);
1717 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_MSA
;
1718 if (kvm_mips_guest_has_fpu(&vcpu
->arch
))
1719 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_FPU
;
1720 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
,
1721 KVM_TRACE_AUX_FPU_MSA
);
1724 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_ENABLE
, KVM_TRACE_AUX_MSA
);
1732 /* Drop FPU & MSA without saving it */
1733 void kvm_drop_fpu(struct kvm_vcpu
*vcpu
)
1736 if (cpu_has_msa
&& vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
) {
1738 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_DISCARD
, KVM_TRACE_AUX_MSA
);
1739 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_MSA
;
1741 if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1742 clear_c0_status(ST0_CU1
| ST0_FR
);
1743 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_DISCARD
, KVM_TRACE_AUX_FPU
);
1744 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_FPU
;
1749 /* Save and disable FPU & MSA */
1750 void kvm_lose_fpu(struct kvm_vcpu
*vcpu
)
1753 * FPU & MSA get disabled in root context (hardware) when it is disabled
1754 * in guest context (software), but the register state in the hardware
1755 * may still be in use. This is why we explicitly re-enable the hardware
1760 if (cpu_has_msa
&& vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
) {
1761 set_c0_config5(MIPS_CONF5_MSAEN
);
1762 enable_fpu_hazard();
1764 __kvm_save_msa(&vcpu
->arch
);
1765 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU_MSA
);
1767 /* Disable MSA & FPU */
1769 if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1770 clear_c0_status(ST0_CU1
| ST0_FR
);
1771 disable_fpu_hazard();
1773 vcpu
->arch
.aux_inuse
&= ~(KVM_MIPS_AUX_FPU
| KVM_MIPS_AUX_MSA
);
1774 } else if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1775 set_c0_status(ST0_CU1
);
1776 enable_fpu_hazard();
1778 __kvm_save_fpu(&vcpu
->arch
);
1779 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_FPU
;
1780 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU
);
1783 clear_c0_status(ST0_CU1
| ST0_FR
);
1784 disable_fpu_hazard();
1790 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1791 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1792 * exception if cause bits are set in the value being written.
1794 static int kvm_mips_csr_die_notify(struct notifier_block
*self
,
1795 unsigned long cmd
, void *ptr
)
1797 struct die_args
*args
= (struct die_args
*)ptr
;
1798 struct pt_regs
*regs
= args
->regs
;
1801 /* Only interested in FPE and MSAFPE */
1802 if (cmd
!= DIE_FP
&& cmd
!= DIE_MSAFP
)
1805 /* Return immediately if guest context isn't active */
1806 if (!(current
->flags
& PF_VCPU
))
1809 /* Should never get here from user mode */
1810 BUG_ON(user_mode(regs
));
1812 pc
= instruction_pointer(regs
);
1815 /* match 2nd instruction in __kvm_restore_fcsr */
1816 if (pc
!= (unsigned long)&__kvm_restore_fcsr
+ 4)
1820 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1822 pc
< (unsigned long)&__kvm_restore_msacsr
+ 4 ||
1823 pc
> (unsigned long)&__kvm_restore_msacsr
+ 8)
1828 /* Move PC forward a little and continue executing */
1829 instruction_pointer(regs
) += 4;
1834 static struct notifier_block kvm_mips_csr_die_notifier
= {
1835 .notifier_call
= kvm_mips_csr_die_notify
,
1838 static int __init
kvm_mips_init(void)
1842 ret
= kvm_mips_entry_setup();
1846 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1851 register_die_notifier(&kvm_mips_csr_die_notifier
);
1856 static void __exit
kvm_mips_exit(void)
1860 unregister_die_notifier(&kvm_mips_csr_die_notifier
);
1863 module_init(kvm_mips_init
);
1864 module_exit(kvm_mips_exit
);
1866 EXPORT_TRACEPOINT_SYMBOL(kvm_exit
);