2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
21 #include <linux/bootmem.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <linux/kvm_host.h>
32 #include "interrupt.h"
35 #define CREATE_TRACE_POINTS
39 #define VECTORSPACING 0x100 /* for EI/VI mode */
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
43 struct kvm_stats_debugfs_item debugfs_entries
[] = {
44 { "wait", VCPU_STAT(wait_exits
), KVM_STAT_VCPU
},
45 { "cache", VCPU_STAT(cache_exits
), KVM_STAT_VCPU
},
46 { "signal", VCPU_STAT(signal_exits
), KVM_STAT_VCPU
},
47 { "interrupt", VCPU_STAT(int_exits
), KVM_STAT_VCPU
},
48 { "cop_unsuable", VCPU_STAT(cop_unusable_exits
), KVM_STAT_VCPU
},
49 { "tlbmod", VCPU_STAT(tlbmod_exits
), KVM_STAT_VCPU
},
50 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits
), KVM_STAT_VCPU
},
51 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits
), KVM_STAT_VCPU
},
52 { "addrerr_st", VCPU_STAT(addrerr_st_exits
), KVM_STAT_VCPU
},
53 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits
), KVM_STAT_VCPU
},
54 { "syscall", VCPU_STAT(syscall_exits
), KVM_STAT_VCPU
},
55 { "resvd_inst", VCPU_STAT(resvd_inst_exits
), KVM_STAT_VCPU
},
56 { "break_inst", VCPU_STAT(break_inst_exits
), KVM_STAT_VCPU
},
57 { "trap_inst", VCPU_STAT(trap_inst_exits
), KVM_STAT_VCPU
},
58 { "msa_fpe", VCPU_STAT(msa_fpe_exits
), KVM_STAT_VCPU
},
59 { "fpe", VCPU_STAT(fpe_exits
), KVM_STAT_VCPU
},
60 { "msa_disabled", VCPU_STAT(msa_disabled_exits
), KVM_STAT_VCPU
},
61 { "flush_dcache", VCPU_STAT(flush_dcache_exits
), KVM_STAT_VCPU
},
62 #ifdef CONFIG_KVM_MIPS_VZ
63 { "vz_gpsi", VCPU_STAT(vz_gpsi_exits
), KVM_STAT_VCPU
},
64 { "vz_gsfc", VCPU_STAT(vz_gsfc_exits
), KVM_STAT_VCPU
},
65 { "vz_hc", VCPU_STAT(vz_hc_exits
), KVM_STAT_VCPU
},
66 { "vz_grr", VCPU_STAT(vz_grr_exits
), KVM_STAT_VCPU
},
67 { "vz_gva", VCPU_STAT(vz_gva_exits
), KVM_STAT_VCPU
},
68 { "vz_ghfc", VCPU_STAT(vz_ghfc_exits
), KVM_STAT_VCPU
},
69 { "vz_gpa", VCPU_STAT(vz_gpa_exits
), KVM_STAT_VCPU
},
70 { "vz_resvd", VCPU_STAT(vz_resvd_exits
), KVM_STAT_VCPU
},
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), KVM_STAT_VCPU
},
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), KVM_STAT_VCPU
},
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
), KVM_STAT_VCPU
},
75 { "halt_wakeup", VCPU_STAT(halt_wakeup
), KVM_STAT_VCPU
},
79 bool kvm_trace_guest_mode_change
;
81 int kvm_guest_mode_change_trace_reg(void)
83 kvm_trace_guest_mode_change
= 1;
87 void kvm_guest_mode_change_trace_unreg(void)
89 kvm_trace_guest_mode_change
= 0;
93 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
94 * Config7, so we are "runnable" if interrupts are pending
96 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
98 return !!(vcpu
->arch
.pending_exceptions
);
101 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
106 int kvm_arch_hardware_enable(void)
108 return kvm_mips_callbacks
->hardware_enable();
111 void kvm_arch_hardware_disable(void)
113 kvm_mips_callbacks
->hardware_disable();
116 int kvm_arch_hardware_setup(void)
121 void kvm_arch_check_processor_compat(void *rtn
)
126 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
129 #ifdef CONFIG_KVM_MIPS_VZ
136 /* Unsupported KVM type */
140 /* Allocate page table to map GPA -> RPA */
141 kvm
->arch
.gpa_mm
.pgd
= kvm_pgd_alloc();
142 if (!kvm
->arch
.gpa_mm
.pgd
)
148 bool kvm_arch_has_vcpu_debugfs(void)
153 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
158 void kvm_mips_free_vcpus(struct kvm
*kvm
)
161 struct kvm_vcpu
*vcpu
;
163 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
164 kvm_arch_vcpu_free(vcpu
);
167 mutex_lock(&kvm
->lock
);
169 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
170 kvm
->vcpus
[i
] = NULL
;
172 atomic_set(&kvm
->online_vcpus
, 0);
174 mutex_unlock(&kvm
->lock
);
177 static void kvm_mips_free_gpa_pt(struct kvm
*kvm
)
179 /* It should always be safe to remove after flushing the whole range */
180 WARN_ON(!kvm_mips_flush_gpa_pt(kvm
, 0, ~0));
181 pgd_free(NULL
, kvm
->arch
.gpa_mm
.pgd
);
184 void kvm_arch_destroy_vm(struct kvm
*kvm
)
186 kvm_mips_free_vcpus(kvm
);
187 kvm_mips_free_gpa_pt(kvm
);
190 long kvm_arch_dev_ioctl(struct file
*filp
, unsigned int ioctl
,
196 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
197 unsigned long npages
)
202 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
204 /* Flush whole GPA */
205 kvm_mips_flush_gpa_pt(kvm
, 0, ~0);
207 /* Let implementation do the rest */
208 kvm_mips_callbacks
->flush_shadow_all(kvm
);
211 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
212 struct kvm_memory_slot
*slot
)
215 * The slot has been made invalid (ready for moving or deletion), so we
216 * need to ensure that it can no longer be accessed by any guest VCPUs.
219 spin_lock(&kvm
->mmu_lock
);
220 /* Flush slot from GPA */
221 kvm_mips_flush_gpa_pt(kvm
, slot
->base_gfn
,
222 slot
->base_gfn
+ slot
->npages
- 1);
223 /* Let implementation do the rest */
224 kvm_mips_callbacks
->flush_shadow_memslot(kvm
, slot
);
225 spin_unlock(&kvm
->mmu_lock
);
228 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
229 struct kvm_memory_slot
*memslot
,
230 const struct kvm_userspace_memory_region
*mem
,
231 enum kvm_mr_change change
)
236 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
237 const struct kvm_userspace_memory_region
*mem
,
238 const struct kvm_memory_slot
*old
,
239 const struct kvm_memory_slot
*new,
240 enum kvm_mr_change change
)
244 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
245 __func__
, kvm
, mem
->slot
, mem
->guest_phys_addr
,
246 mem
->memory_size
, mem
->userspace_addr
);
249 * If dirty page logging is enabled, write protect all pages in the slot
250 * ready for dirty logging.
252 * There is no need to do this in any of the following cases:
253 * CREATE: No dirty mappings will already exist.
254 * MOVE/DELETE: The old mappings will already have been cleaned up by
255 * kvm_arch_flush_shadow_memslot()
257 if (change
== KVM_MR_FLAGS_ONLY
&&
258 (!(old
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) &&
259 new->flags
& KVM_MEM_LOG_DIRTY_PAGES
)) {
260 spin_lock(&kvm
->mmu_lock
);
261 /* Write protect GPA page table entries */
262 needs_flush
= kvm_mips_mkclean_gpa_pt(kvm
, new->base_gfn
,
263 new->base_gfn
+ new->npages
- 1);
264 /* Let implementation do the rest */
266 kvm_mips_callbacks
->flush_shadow_memslot(kvm
, new);
267 spin_unlock(&kvm
->mmu_lock
);
271 static inline void dump_handler(const char *symbol
, void *start
, void *end
)
275 pr_debug("LEAF(%s)\n", symbol
);
277 pr_debug("\t.set push\n");
278 pr_debug("\t.set noreorder\n");
280 for (p
= start
; p
< (u32
*)end
; ++p
)
281 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p
, p
);
283 pr_debug("\t.set\tpop\n");
285 pr_debug("\tEND(%s)\n", symbol
);
288 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
291 void *gebase
, *p
, *handler
, *refill_start
, *refill_end
;
294 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
301 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
306 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm
, id
, vcpu
);
309 * Allocate space for host mode exception handlers that handle
312 if (cpu_has_veic
|| cpu_has_vint
)
313 size
= 0x200 + VECTORSPACING
* 64;
317 gebase
= kzalloc(ALIGN(size
, PAGE_SIZE
), GFP_KERNEL
);
323 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
324 ALIGN(size
, PAGE_SIZE
), gebase
);
327 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
328 * limits us to the low 512MB of physical address space. If the memory
329 * we allocate is out of range, just give up now.
331 if (!cpu_has_ebase_wg
&& virt_to_phys(gebase
) >= 0x20000000) {
332 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
335 goto out_free_gebase
;
339 vcpu
->arch
.guest_ebase
= gebase
;
341 /* Build guest exception vectors dynamically in unmapped memory */
342 handler
= gebase
+ 0x2000;
344 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
345 refill_start
= gebase
;
346 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ
) && IS_ENABLED(CONFIG_64BIT
))
347 refill_start
+= 0x080;
348 refill_end
= kvm_mips_build_tlb_refill_exception(refill_start
, handler
);
350 /* General Exception Entry point */
351 kvm_mips_build_exception(gebase
+ 0x180, handler
);
353 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
354 for (i
= 0; i
< 8; i
++) {
355 kvm_debug("L1 Vectored handler @ %p\n",
356 gebase
+ 0x200 + (i
* VECTORSPACING
));
357 kvm_mips_build_exception(gebase
+ 0x200 + i
* VECTORSPACING
,
361 /* General exit handler */
363 p
= kvm_mips_build_exit(p
);
365 /* Guest entry routine */
366 vcpu
->arch
.vcpu_run
= p
;
367 p
= kvm_mips_build_vcpu_run(p
);
369 /* Dump the generated code */
370 pr_debug("#include <asm/asm.h>\n");
371 pr_debug("#include <asm/regdef.h>\n");
373 dump_handler("kvm_vcpu_run", vcpu
->arch
.vcpu_run
, p
);
374 dump_handler("kvm_tlb_refill", refill_start
, refill_end
);
375 dump_handler("kvm_gen_exc", gebase
+ 0x180, gebase
+ 0x200);
376 dump_handler("kvm_exit", gebase
+ 0x2000, vcpu
->arch
.vcpu_run
);
378 /* Invalidate the icache for these ranges */
379 flush_icache_range((unsigned long)gebase
,
380 (unsigned long)gebase
+ ALIGN(size
, PAGE_SIZE
));
383 * Allocate comm page for guest kernel, a TLB will be reserved for
384 * mapping GVA @ 0xFFFF8000 to this page
386 vcpu
->arch
.kseg0_commpage
= kzalloc(PAGE_SIZE
<< 1, GFP_KERNEL
);
388 if (!vcpu
->arch
.kseg0_commpage
) {
390 goto out_free_gebase
;
393 kvm_debug("Allocated COMM page @ %p\n", vcpu
->arch
.kseg0_commpage
);
394 kvm_mips_commpage_init(vcpu
);
397 vcpu
->arch
.last_sched_cpu
= -1;
398 vcpu
->arch
.last_exec_cpu
= -1;
406 kvm_vcpu_uninit(vcpu
);
415 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
417 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
419 kvm_vcpu_uninit(vcpu
);
421 kvm_mips_dump_stats(vcpu
);
423 kvm_mmu_free_memory_caches(vcpu
);
424 kfree(vcpu
->arch
.guest_ebase
);
425 kfree(vcpu
->arch
.kseg0_commpage
);
429 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
431 kvm_arch_vcpu_free(vcpu
);
434 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
435 struct kvm_guest_debug
*dbg
)
440 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
445 if (vcpu
->sigset_active
)
446 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
448 if (vcpu
->mmio_needed
) {
449 if (!vcpu
->mmio_is_write
)
450 kvm_mips_complete_mmio_load(vcpu
, run
);
451 vcpu
->mmio_needed
= 0;
454 if (run
->immediate_exit
)
460 guest_enter_irqoff();
461 trace_kvm_enter(vcpu
);
464 * Make sure the read of VCPU requests in vcpu_run() callback is not
465 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
466 * flush request while the requester sees the VCPU as outside of guest
467 * mode and not needing an IPI.
469 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
471 r
= kvm_mips_callbacks
->vcpu_run(run
, vcpu
);
478 if (vcpu
->sigset_active
)
479 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
484 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
485 struct kvm_mips_interrupt
*irq
)
487 int intr
= (int)irq
->irq
;
488 struct kvm_vcpu
*dvcpu
= NULL
;
490 if (intr
== 3 || intr
== -3 || intr
== 4 || intr
== -4)
491 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__
, irq
->cpu
,
497 dvcpu
= vcpu
->kvm
->vcpus
[irq
->cpu
];
499 if (intr
== 2 || intr
== 3 || intr
== 4) {
500 kvm_mips_callbacks
->queue_io_int(dvcpu
, irq
);
502 } else if (intr
== -2 || intr
== -3 || intr
== -4) {
503 kvm_mips_callbacks
->dequeue_io_int(dvcpu
, irq
);
505 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__
,
510 dvcpu
->arch
.wait
= 0;
512 if (swait_active(&dvcpu
->wq
))
513 swake_up(&dvcpu
->wq
);
518 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
519 struct kvm_mp_state
*mp_state
)
524 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
525 struct kvm_mp_state
*mp_state
)
530 static u64 kvm_mips_get_one_regs
[] = {
564 #ifndef CONFIG_CPU_MIPSR6
571 static u64 kvm_mips_get_one_regs_fpu
[] = {
573 KVM_REG_MIPS_FCR_CSR
,
576 static u64 kvm_mips_get_one_regs_msa
[] = {
578 KVM_REG_MIPS_MSA_CSR
,
581 static unsigned long kvm_mips_num_regs(struct kvm_vcpu
*vcpu
)
585 ret
= ARRAY_SIZE(kvm_mips_get_one_regs
);
586 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
)) {
587 ret
+= ARRAY_SIZE(kvm_mips_get_one_regs_fpu
) + 48;
589 if (boot_cpu_data
.fpu_id
& MIPS_FPIR_F64
)
592 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
593 ret
+= ARRAY_SIZE(kvm_mips_get_one_regs_msa
) + 32;
594 ret
+= kvm_mips_callbacks
->num_regs(vcpu
);
599 static int kvm_mips_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*indices
)
604 if (copy_to_user(indices
, kvm_mips_get_one_regs
,
605 sizeof(kvm_mips_get_one_regs
)))
607 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs
);
609 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
)) {
610 if (copy_to_user(indices
, kvm_mips_get_one_regs_fpu
,
611 sizeof(kvm_mips_get_one_regs_fpu
)))
613 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs_fpu
);
615 for (i
= 0; i
< 32; ++i
) {
616 index
= KVM_REG_MIPS_FPR_32(i
);
617 if (copy_to_user(indices
, &index
, sizeof(index
)))
621 /* skip odd doubles if no F64 */
622 if (i
& 1 && !(boot_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
625 index
= KVM_REG_MIPS_FPR_64(i
);
626 if (copy_to_user(indices
, &index
, sizeof(index
)))
632 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
)) {
633 if (copy_to_user(indices
, kvm_mips_get_one_regs_msa
,
634 sizeof(kvm_mips_get_one_regs_msa
)))
636 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs_msa
);
638 for (i
= 0; i
< 32; ++i
) {
639 index
= KVM_REG_MIPS_VEC_128(i
);
640 if (copy_to_user(indices
, &index
, sizeof(index
)))
646 return kvm_mips_callbacks
->copy_reg_indices(vcpu
, indices
);
649 static int kvm_mips_get_reg(struct kvm_vcpu
*vcpu
,
650 const struct kvm_one_reg
*reg
)
652 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
653 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
660 /* General purpose registers */
661 case KVM_REG_MIPS_R0
... KVM_REG_MIPS_R31
:
662 v
= (long)vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
];
664 #ifndef CONFIG_CPU_MIPSR6
665 case KVM_REG_MIPS_HI
:
666 v
= (long)vcpu
->arch
.hi
;
668 case KVM_REG_MIPS_LO
:
669 v
= (long)vcpu
->arch
.lo
;
672 case KVM_REG_MIPS_PC
:
673 v
= (long)vcpu
->arch
.pc
;
676 /* Floating point registers */
677 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
678 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
680 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
681 /* Odd singles in top of even double when FR=0 */
682 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
683 v
= get_fpr32(&fpu
->fpr
[idx
], 0);
685 v
= get_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1);
687 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
688 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
690 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
691 /* Can't access odd doubles in FR=0 mode */
692 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
694 v
= get_fpr64(&fpu
->fpr
[idx
], 0);
696 case KVM_REG_MIPS_FCR_IR
:
697 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
699 v
= boot_cpu_data
.fpu_id
;
701 case KVM_REG_MIPS_FCR_CSR
:
702 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
707 /* MIPS SIMD Architecture (MSA) registers */
708 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
709 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
711 /* Can't access MSA registers in FR=0 mode */
712 if (!(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
714 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
715 #ifdef CONFIG_CPU_LITTLE_ENDIAN
716 /* least significant byte first */
717 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 0);
718 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 1);
720 /* most significant byte first */
721 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 1);
722 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 0);
725 case KVM_REG_MIPS_MSA_IR
:
726 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
728 v
= boot_cpu_data
.msa_id
;
730 case KVM_REG_MIPS_MSA_CSR
:
731 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
736 /* registers to be handled specially */
738 ret
= kvm_mips_callbacks
->get_one_reg(vcpu
, reg
, &v
);
743 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
744 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
746 return put_user(v
, uaddr64
);
747 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
748 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
751 return put_user(v32
, uaddr32
);
752 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
753 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
755 return copy_to_user(uaddr
, vs
, 16) ? -EFAULT
: 0;
761 static int kvm_mips_set_reg(struct kvm_vcpu
*vcpu
,
762 const struct kvm_one_reg
*reg
)
764 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
765 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
770 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
771 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
773 if (get_user(v
, uaddr64
) != 0)
775 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
776 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
779 if (get_user(v32
, uaddr32
) != 0)
782 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
783 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
785 return copy_from_user(vs
, uaddr
, 16) ? -EFAULT
: 0;
791 /* General purpose registers */
792 case KVM_REG_MIPS_R0
:
793 /* Silently ignore requests to set $0 */
795 case KVM_REG_MIPS_R1
... KVM_REG_MIPS_R31
:
796 vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
] = v
;
798 #ifndef CONFIG_CPU_MIPSR6
799 case KVM_REG_MIPS_HI
:
802 case KVM_REG_MIPS_LO
:
806 case KVM_REG_MIPS_PC
:
810 /* Floating point registers */
811 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
812 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
814 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
815 /* Odd singles in top of even double when FR=0 */
816 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
817 set_fpr32(&fpu
->fpr
[idx
], 0, v
);
819 set_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1, v
);
821 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
822 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
824 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
825 /* Can't access odd doubles in FR=0 mode */
826 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
828 set_fpr64(&fpu
->fpr
[idx
], 0, v
);
830 case KVM_REG_MIPS_FCR_IR
:
831 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
835 case KVM_REG_MIPS_FCR_CSR
:
836 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
841 /* MIPS SIMD Architecture (MSA) registers */
842 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
843 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
845 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
846 #ifdef CONFIG_CPU_LITTLE_ENDIAN
847 /* least significant byte first */
848 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[0]);
849 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[1]);
851 /* most significant byte first */
852 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[0]);
853 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[1]);
856 case KVM_REG_MIPS_MSA_IR
:
857 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
861 case KVM_REG_MIPS_MSA_CSR
:
862 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
867 /* registers to be handled specially */
869 return kvm_mips_callbacks
->set_one_reg(vcpu
, reg
, v
);
874 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
875 struct kvm_enable_cap
*cap
)
879 if (!kvm_vm_ioctl_check_extension(vcpu
->kvm
, cap
->cap
))
887 case KVM_CAP_MIPS_FPU
:
888 vcpu
->arch
.fpu_enabled
= true;
890 case KVM_CAP_MIPS_MSA
:
891 vcpu
->arch
.msa_enabled
= true;
901 long kvm_arch_vcpu_ioctl(struct file
*filp
, unsigned int ioctl
,
904 struct kvm_vcpu
*vcpu
= filp
->private_data
;
905 void __user
*argp
= (void __user
*)arg
;
909 case KVM_SET_ONE_REG
:
910 case KVM_GET_ONE_REG
: {
911 struct kvm_one_reg reg
;
913 if (copy_from_user(®
, argp
, sizeof(reg
)))
915 if (ioctl
== KVM_SET_ONE_REG
)
916 return kvm_mips_set_reg(vcpu
, ®
);
918 return kvm_mips_get_reg(vcpu
, ®
);
920 case KVM_GET_REG_LIST
: {
921 struct kvm_reg_list __user
*user_list
= argp
;
922 struct kvm_reg_list reg_list
;
925 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
928 reg_list
.n
= kvm_mips_num_regs(vcpu
);
929 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
933 return kvm_mips_copy_reg_indices(vcpu
, user_list
->reg
);
937 struct kvm_mips_interrupt irq
;
939 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
941 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
,
944 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
947 case KVM_ENABLE_CAP
: {
948 struct kvm_enable_cap cap
;
950 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
952 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
962 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
964 * @log: slot id and address to which we copy the log
966 * Steps 1-4 below provide general overview of dirty page logging. See
967 * kvm_get_dirty_log_protect() function description for additional details.
969 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
970 * always flush the TLB (step 4) even if previous step failed and the dirty
971 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
972 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
973 * writes will be marked dirty for next log read.
975 * 1. Take a snapshot of the bit and clear it if needed.
976 * 2. Write protect the corresponding page.
977 * 3. Copy the snapshot to the userspace.
978 * 4. Flush TLB's if needed.
980 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
982 struct kvm_memslots
*slots
;
983 struct kvm_memory_slot
*memslot
;
984 bool is_dirty
= false;
987 mutex_lock(&kvm
->slots_lock
);
989 r
= kvm_get_dirty_log_protect(kvm
, log
, &is_dirty
);
992 slots
= kvm_memslots(kvm
);
993 memslot
= id_to_memslot(slots
, log
->slot
);
995 /* Let implementation handle TLB/GVA invalidation */
996 kvm_mips_callbacks
->flush_shadow_memslot(kvm
, memslot
);
999 mutex_unlock(&kvm
->slots_lock
);
1003 long kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
1015 int kvm_arch_init(void *opaque
)
1017 if (kvm_mips_callbacks
) {
1018 kvm_err("kvm: module already exists\n");
1022 return kvm_mips_emulation_init(&kvm_mips_callbacks
);
1025 void kvm_arch_exit(void)
1027 kvm_mips_callbacks
= NULL
;
1030 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1031 struct kvm_sregs
*sregs
)
1033 return -ENOIOCTLCMD
;
1036 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1037 struct kvm_sregs
*sregs
)
1039 return -ENOIOCTLCMD
;
1042 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1046 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1048 return -ENOIOCTLCMD
;
1051 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1053 return -ENOIOCTLCMD
;
1056 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1058 return VM_FAULT_SIGBUS
;
1061 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
1066 case KVM_CAP_ONE_REG
:
1067 case KVM_CAP_ENABLE_CAP
:
1068 case KVM_CAP_READONLY_MEM
:
1069 case KVM_CAP_SYNC_MMU
:
1070 case KVM_CAP_IMMEDIATE_EXIT
:
1073 case KVM_CAP_NR_VCPUS
:
1074 r
= num_online_cpus();
1076 case KVM_CAP_MAX_VCPUS
:
1079 case KVM_CAP_MIPS_FPU
:
1080 /* We don't handle systems with inconsistent cpu_has_fpu */
1081 r
= !!raw_cpu_has_fpu
;
1083 case KVM_CAP_MIPS_MSA
:
1085 * We don't support MSA vector partitioning yet:
1086 * 1) It would require explicit support which can't be tested
1087 * yet due to lack of support in current hardware.
1088 * 2) It extends the state that would need to be saved/restored
1089 * by e.g. QEMU for migration.
1091 * When vector partitioning hardware becomes available, support
1092 * could be added by requiring a flag when enabling
1093 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1094 * to save/restore the appropriate extra state.
1096 r
= cpu_has_msa
&& !(boot_cpu_data
.msa_id
& MSA_IR_WRPF
);
1099 r
= kvm_mips_callbacks
->check_extension(kvm
, ext
);
1105 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1107 return kvm_mips_pending_timer(vcpu
) ||
1108 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
) & C_TI
;
1111 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
1114 struct mips_coproc
*cop0
;
1119 kvm_debug("VCPU Register Dump:\n");
1120 kvm_debug("\tpc = 0x%08lx\n", vcpu
->arch
.pc
);
1121 kvm_debug("\texceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
1123 for (i
= 0; i
< 32; i
+= 4) {
1124 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i
,
1126 vcpu
->arch
.gprs
[i
+ 1],
1127 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
1129 kvm_debug("\thi: 0x%08lx\n", vcpu
->arch
.hi
);
1130 kvm_debug("\tlo: 0x%08lx\n", vcpu
->arch
.lo
);
1132 cop0
= vcpu
->arch
.cop0
;
1133 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1134 kvm_read_c0_guest_status(cop0
),
1135 kvm_read_c0_guest_cause(cop0
));
1137 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0
));
1142 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1146 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1147 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
1148 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
1149 vcpu
->arch
.hi
= regs
->hi
;
1150 vcpu
->arch
.lo
= regs
->lo
;
1151 vcpu
->arch
.pc
= regs
->pc
;
1156 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1160 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1161 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
1163 regs
->hi
= vcpu
->arch
.hi
;
1164 regs
->lo
= vcpu
->arch
.lo
;
1165 regs
->pc
= vcpu
->arch
.pc
;
1170 static void kvm_mips_comparecount_func(unsigned long data
)
1172 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
1174 kvm_mips_callbacks
->queue_timer_int(vcpu
);
1176 vcpu
->arch
.wait
= 0;
1177 if (swait_active(&vcpu
->wq
))
1178 swake_up(&vcpu
->wq
);
1181 /* low level hrtimer wake routine */
1182 static enum hrtimer_restart
kvm_mips_comparecount_wakeup(struct hrtimer
*timer
)
1184 struct kvm_vcpu
*vcpu
;
1186 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.comparecount_timer
);
1187 kvm_mips_comparecount_func((unsigned long) vcpu
);
1188 return kvm_mips_count_timeout(vcpu
);
1191 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1195 err
= kvm_mips_callbacks
->vcpu_init(vcpu
);
1199 hrtimer_init(&vcpu
->arch
.comparecount_timer
, CLOCK_MONOTONIC
,
1201 vcpu
->arch
.comparecount_timer
.function
= kvm_mips_comparecount_wakeup
;
1205 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
1207 kvm_mips_callbacks
->vcpu_uninit(vcpu
);
1210 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1211 struct kvm_translation
*tr
)
1216 /* Initial guest state */
1217 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1219 return kvm_mips_callbacks
->vcpu_setup(vcpu
);
1222 static void kvm_mips_set_c0_status(void)
1224 u32 status
= read_c0_status();
1229 write_c0_status(status
);
1234 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1236 int kvm_mips_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1238 u32 cause
= vcpu
->arch
.host_cp0_cause
;
1239 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1240 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
1241 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1242 enum emulation_result er
= EMULATE_DONE
;
1244 int ret
= RESUME_GUEST
;
1246 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1248 /* re-enable HTW before enabling interrupts */
1249 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ
))
1252 /* Set a default exit reason */
1253 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1254 run
->ready_for_interrupt_injection
= 1;
1257 * Set the appropriate status bits based on host CPU features,
1258 * before we hit the scheduler
1260 kvm_mips_set_c0_status();
1264 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1265 cause
, opc
, run
, vcpu
);
1266 trace_kvm_exit(vcpu
, exccode
);
1268 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ
)) {
1270 * Do a privilege check, if in UM most of these exit conditions
1271 * end up causing an exception to be delivered to the Guest
1274 er
= kvm_mips_check_privilege(cause
, opc
, run
, vcpu
);
1275 if (er
== EMULATE_PRIV_FAIL
) {
1277 } else if (er
== EMULATE_FAIL
) {
1278 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1286 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu
->vcpu_id
, opc
);
1288 ++vcpu
->stat
.int_exits
;
1297 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc
);
1299 ++vcpu
->stat
.cop_unusable_exits
;
1300 ret
= kvm_mips_callbacks
->handle_cop_unusable(vcpu
);
1301 /* XXXKYMA: Might need to return to user space */
1302 if (run
->exit_reason
== KVM_EXIT_IRQ_WINDOW_OPEN
)
1307 ++vcpu
->stat
.tlbmod_exits
;
1308 ret
= kvm_mips_callbacks
->handle_tlb_mod(vcpu
);
1312 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1313 cause
, kvm_read_c0_guest_status(vcpu
->arch
.cop0
), opc
,
1316 ++vcpu
->stat
.tlbmiss_st_exits
;
1317 ret
= kvm_mips_callbacks
->handle_tlb_st_miss(vcpu
);
1321 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1322 cause
, opc
, badvaddr
);
1324 ++vcpu
->stat
.tlbmiss_ld_exits
;
1325 ret
= kvm_mips_callbacks
->handle_tlb_ld_miss(vcpu
);
1329 ++vcpu
->stat
.addrerr_st_exits
;
1330 ret
= kvm_mips_callbacks
->handle_addr_err_st(vcpu
);
1334 ++vcpu
->stat
.addrerr_ld_exits
;
1335 ret
= kvm_mips_callbacks
->handle_addr_err_ld(vcpu
);
1339 ++vcpu
->stat
.syscall_exits
;
1340 ret
= kvm_mips_callbacks
->handle_syscall(vcpu
);
1344 ++vcpu
->stat
.resvd_inst_exits
;
1345 ret
= kvm_mips_callbacks
->handle_res_inst(vcpu
);
1349 ++vcpu
->stat
.break_inst_exits
;
1350 ret
= kvm_mips_callbacks
->handle_break(vcpu
);
1354 ++vcpu
->stat
.trap_inst_exits
;
1355 ret
= kvm_mips_callbacks
->handle_trap(vcpu
);
1358 case EXCCODE_MSAFPE
:
1359 ++vcpu
->stat
.msa_fpe_exits
;
1360 ret
= kvm_mips_callbacks
->handle_msa_fpe(vcpu
);
1364 ++vcpu
->stat
.fpe_exits
;
1365 ret
= kvm_mips_callbacks
->handle_fpe(vcpu
);
1368 case EXCCODE_MSADIS
:
1369 ++vcpu
->stat
.msa_disabled_exits
;
1370 ret
= kvm_mips_callbacks
->handle_msa_disabled(vcpu
);
1374 /* defer exit accounting to handler */
1375 ret
= kvm_mips_callbacks
->handle_guest_exit(vcpu
);
1379 if (cause
& CAUSEF_BD
)
1382 kvm_get_badinstr(opc
, vcpu
, &inst
);
1383 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1384 exccode
, opc
, inst
, badvaddr
,
1385 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
1386 kvm_arch_vcpu_dump_regs(vcpu
);
1387 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1394 local_irq_disable();
1396 if (ret
== RESUME_GUEST
)
1397 kvm_vz_acquire_htimer(vcpu
);
1399 if (er
== EMULATE_DONE
&& !(ret
& RESUME_HOST
))
1400 kvm_mips_deliver_interrupts(vcpu
, cause
);
1402 if (!(ret
& RESUME_HOST
)) {
1403 /* Only check for signals if not already exiting to userspace */
1404 if (signal_pending(current
)) {
1405 run
->exit_reason
= KVM_EXIT_INTR
;
1406 ret
= (-EINTR
<< 2) | RESUME_HOST
;
1407 ++vcpu
->stat
.signal_exits
;
1408 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_SIGNAL
);
1412 if (ret
== RESUME_GUEST
) {
1413 trace_kvm_reenter(vcpu
);
1416 * Make sure the read of VCPU requests in vcpu_reenter()
1417 * callback is not reordered ahead of the write to vcpu->mode,
1418 * or we could miss a TLB flush request while the requester sees
1419 * the VCPU as outside of guest mode and not needing an IPI.
1421 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
1423 kvm_mips_callbacks
->vcpu_reenter(run
, vcpu
);
1426 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1427 * is live), restore FCR31 / MSACSR.
1429 * This should be before returning to the guest exception
1430 * vector, as it may well cause an [MSA] FP exception if there
1431 * are pending exception bits unmasked. (see
1432 * kvm_mips_csr_die_notifier() for how that is handled).
1434 if (kvm_mips_guest_has_fpu(&vcpu
->arch
) &&
1435 read_c0_status() & ST0_CU1
)
1436 __kvm_restore_fcsr(&vcpu
->arch
);
1438 if (kvm_mips_guest_has_msa(&vcpu
->arch
) &&
1439 read_c0_config5() & MIPS_CONF5_MSAEN
)
1440 __kvm_restore_msacsr(&vcpu
->arch
);
1443 /* Disable HTW before returning to guest or host */
1444 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ
))
1450 /* Enable FPU for guest and restore context */
1451 void kvm_own_fpu(struct kvm_vcpu
*vcpu
)
1453 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1454 unsigned int sr
, cfg5
;
1458 sr
= kvm_read_c0_guest_status(cop0
);
1461 * If MSA state is already live, it is undefined how it interacts with
1462 * FR=0 FPU state, and we don't want to hit reserved instruction
1463 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1464 * play it safe and save it first.
1466 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1467 * get called when guest CU1 is set, however we can't trust the guest
1468 * not to clobber the status register directly via the commpage.
1470 if (cpu_has_msa
&& sr
& ST0_CU1
&& !(sr
& ST0_FR
) &&
1471 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1475 * Enable FPU for guest
1476 * We set FR and FRE according to guest context
1478 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1480 cfg5
= kvm_read_c0_guest_config5(cop0
);
1481 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1483 enable_fpu_hazard();
1485 /* If guest FPU state not active, restore it now */
1486 if (!(vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)) {
1487 __kvm_restore_fpu(&vcpu
->arch
);
1488 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_FPU
;
1489 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_FPU
);
1491 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_ENABLE
, KVM_TRACE_AUX_FPU
);
1497 #ifdef CONFIG_CPU_HAS_MSA
1498 /* Enable MSA for guest and restore context */
1499 void kvm_own_msa(struct kvm_vcpu
*vcpu
)
1501 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1502 unsigned int sr
, cfg5
;
1507 * Enable FPU if enabled in guest, since we're restoring FPU context
1508 * anyway. We set FR and FRE according to guest context.
1510 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
1511 sr
= kvm_read_c0_guest_status(cop0
);
1514 * If FR=0 FPU state is already live, it is undefined how it
1515 * interacts with MSA state, so play it safe and save it first.
1517 if (!(sr
& ST0_FR
) &&
1518 (vcpu
->arch
.aux_inuse
& (KVM_MIPS_AUX_FPU
|
1519 KVM_MIPS_AUX_MSA
)) == KVM_MIPS_AUX_FPU
)
1522 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1523 if (sr
& ST0_CU1
&& cpu_has_fre
) {
1524 cfg5
= kvm_read_c0_guest_config5(cop0
);
1525 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1529 /* Enable MSA for guest */
1530 set_c0_config5(MIPS_CONF5_MSAEN
);
1531 enable_fpu_hazard();
1533 switch (vcpu
->arch
.aux_inuse
& (KVM_MIPS_AUX_FPU
| KVM_MIPS_AUX_MSA
)) {
1534 case KVM_MIPS_AUX_FPU
:
1536 * Guest FPU state already loaded, only restore upper MSA state
1538 __kvm_restore_msa_upper(&vcpu
->arch
);
1539 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_MSA
;
1540 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_MSA
);
1543 /* Neither FPU or MSA already active, restore full MSA state */
1544 __kvm_restore_msa(&vcpu
->arch
);
1545 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_MSA
;
1546 if (kvm_mips_guest_has_fpu(&vcpu
->arch
))
1547 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_FPU
;
1548 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
,
1549 KVM_TRACE_AUX_FPU_MSA
);
1552 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_ENABLE
, KVM_TRACE_AUX_MSA
);
1560 /* Drop FPU & MSA without saving it */
1561 void kvm_drop_fpu(struct kvm_vcpu
*vcpu
)
1564 if (cpu_has_msa
&& vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
) {
1566 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_DISCARD
, KVM_TRACE_AUX_MSA
);
1567 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_MSA
;
1569 if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1570 clear_c0_status(ST0_CU1
| ST0_FR
);
1571 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_DISCARD
, KVM_TRACE_AUX_FPU
);
1572 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_FPU
;
1577 /* Save and disable FPU & MSA */
1578 void kvm_lose_fpu(struct kvm_vcpu
*vcpu
)
1581 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1582 * is disabled in guest context (software), but the register state in
1583 * the hardware may still be in use.
1584 * This is why we explicitly re-enable the hardware before saving.
1588 if (cpu_has_msa
&& vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
) {
1589 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ
)) {
1590 set_c0_config5(MIPS_CONF5_MSAEN
);
1591 enable_fpu_hazard();
1594 __kvm_save_msa(&vcpu
->arch
);
1595 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU_MSA
);
1597 /* Disable MSA & FPU */
1599 if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1600 clear_c0_status(ST0_CU1
| ST0_FR
);
1601 disable_fpu_hazard();
1603 vcpu
->arch
.aux_inuse
&= ~(KVM_MIPS_AUX_FPU
| KVM_MIPS_AUX_MSA
);
1604 } else if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1605 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ
)) {
1606 set_c0_status(ST0_CU1
);
1607 enable_fpu_hazard();
1610 __kvm_save_fpu(&vcpu
->arch
);
1611 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_FPU
;
1612 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU
);
1615 clear_c0_status(ST0_CU1
| ST0_FR
);
1616 disable_fpu_hazard();
1622 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1623 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1624 * exception if cause bits are set in the value being written.
1626 static int kvm_mips_csr_die_notify(struct notifier_block
*self
,
1627 unsigned long cmd
, void *ptr
)
1629 struct die_args
*args
= (struct die_args
*)ptr
;
1630 struct pt_regs
*regs
= args
->regs
;
1633 /* Only interested in FPE and MSAFPE */
1634 if (cmd
!= DIE_FP
&& cmd
!= DIE_MSAFP
)
1637 /* Return immediately if guest context isn't active */
1638 if (!(current
->flags
& PF_VCPU
))
1641 /* Should never get here from user mode */
1642 BUG_ON(user_mode(regs
));
1644 pc
= instruction_pointer(regs
);
1647 /* match 2nd instruction in __kvm_restore_fcsr */
1648 if (pc
!= (unsigned long)&__kvm_restore_fcsr
+ 4)
1652 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1654 pc
< (unsigned long)&__kvm_restore_msacsr
+ 4 ||
1655 pc
> (unsigned long)&__kvm_restore_msacsr
+ 8)
1660 /* Move PC forward a little and continue executing */
1661 instruction_pointer(regs
) += 4;
1666 static struct notifier_block kvm_mips_csr_die_notifier
= {
1667 .notifier_call
= kvm_mips_csr_die_notify
,
1670 static int __init
kvm_mips_init(void)
1674 ret
= kvm_mips_entry_setup();
1678 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1683 register_die_notifier(&kvm_mips_csr_die_notifier
);
1688 static void __exit
kvm_mips_exit(void)
1692 unregister_die_notifier(&kvm_mips_csr_die_notifier
);
1695 module_init(kvm_mips_init
);
1696 module_exit(kvm_mips_exit
);
1698 EXPORT_TRACEPOINT_SYMBOL(kvm_exit
);