2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
21 #include <linux/memblock.h>
22 #include <linux/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/pgalloc.h>
30 #include <linux/kvm_host.h>
32 #include "interrupt.h"
34 #define CREATE_TRACE_POINTS
38 #define VECTORSPACING 0x100 /* for EI/VI mode */
41 const struct _kvm_stats_desc kvm_vm_stats_desc
[] = {
42 KVM_GENERIC_VM_STATS()
45 const struct kvm_stats_header kvm_vm_stats_header
= {
46 .name_size
= KVM_STATS_NAME_SIZE
,
47 .num_desc
= ARRAY_SIZE(kvm_vm_stats_desc
),
48 .id_offset
= sizeof(struct kvm_stats_header
),
49 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
50 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
51 sizeof(kvm_vm_stats_desc
),
54 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
55 KVM_GENERIC_VCPU_STATS(),
56 STATS_DESC_COUNTER(VCPU
, wait_exits
),
57 STATS_DESC_COUNTER(VCPU
, cache_exits
),
58 STATS_DESC_COUNTER(VCPU
, signal_exits
),
59 STATS_DESC_COUNTER(VCPU
, int_exits
),
60 STATS_DESC_COUNTER(VCPU
, cop_unusable_exits
),
61 STATS_DESC_COUNTER(VCPU
, tlbmod_exits
),
62 STATS_DESC_COUNTER(VCPU
, tlbmiss_ld_exits
),
63 STATS_DESC_COUNTER(VCPU
, tlbmiss_st_exits
),
64 STATS_DESC_COUNTER(VCPU
, addrerr_st_exits
),
65 STATS_DESC_COUNTER(VCPU
, addrerr_ld_exits
),
66 STATS_DESC_COUNTER(VCPU
, syscall_exits
),
67 STATS_DESC_COUNTER(VCPU
, resvd_inst_exits
),
68 STATS_DESC_COUNTER(VCPU
, break_inst_exits
),
69 STATS_DESC_COUNTER(VCPU
, trap_inst_exits
),
70 STATS_DESC_COUNTER(VCPU
, msa_fpe_exits
),
71 STATS_DESC_COUNTER(VCPU
, fpe_exits
),
72 STATS_DESC_COUNTER(VCPU
, msa_disabled_exits
),
73 STATS_DESC_COUNTER(VCPU
, flush_dcache_exits
),
74 STATS_DESC_COUNTER(VCPU
, vz_gpsi_exits
),
75 STATS_DESC_COUNTER(VCPU
, vz_gsfc_exits
),
76 STATS_DESC_COUNTER(VCPU
, vz_hc_exits
),
77 STATS_DESC_COUNTER(VCPU
, vz_grr_exits
),
78 STATS_DESC_COUNTER(VCPU
, vz_gva_exits
),
79 STATS_DESC_COUNTER(VCPU
, vz_ghfc_exits
),
80 STATS_DESC_COUNTER(VCPU
, vz_gpa_exits
),
81 STATS_DESC_COUNTER(VCPU
, vz_resvd_exits
),
82 #ifdef CONFIG_CPU_LOONGSON64
83 STATS_DESC_COUNTER(VCPU
, vz_cpucfg_exits
),
87 const struct kvm_stats_header kvm_vcpu_stats_header
= {
88 .name_size
= KVM_STATS_NAME_SIZE
,
89 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
90 .id_offset
= sizeof(struct kvm_stats_header
),
91 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
92 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
93 sizeof(kvm_vcpu_stats_desc
),
96 bool kvm_trace_guest_mode_change
;
98 int kvm_guest_mode_change_trace_reg(void)
100 kvm_trace_guest_mode_change
= true;
104 void kvm_guest_mode_change_trace_unreg(void)
106 kvm_trace_guest_mode_change
= false;
110 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
111 * Config7, so we are "runnable" if interrupts are pending
113 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
115 return !!(vcpu
->arch
.pending_exceptions
);
118 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
123 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
128 int kvm_arch_hardware_enable(void)
130 return kvm_mips_callbacks
->hardware_enable();
133 void kvm_arch_hardware_disable(void)
135 kvm_mips_callbacks
->hardware_disable();
138 int kvm_arch_hardware_setup(void *opaque
)
143 int kvm_arch_check_processor_compat(void *opaque
)
148 extern void kvm_init_loongson_ipi(struct kvm
*kvm
);
150 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
153 case KVM_VM_MIPS_AUTO
:
158 /* Unsupported KVM type */
162 /* Allocate page table to map GPA -> RPA */
163 kvm
->arch
.gpa_mm
.pgd
= kvm_pgd_alloc();
164 if (!kvm
->arch
.gpa_mm
.pgd
)
167 #ifdef CONFIG_CPU_LOONGSON64
168 kvm_init_loongson_ipi(kvm
);
174 void kvm_mips_free_vcpus(struct kvm
*kvm
)
177 struct kvm_vcpu
*vcpu
;
179 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
180 kvm_vcpu_destroy(vcpu
);
183 mutex_lock(&kvm
->lock
);
185 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
186 kvm
->vcpus
[i
] = NULL
;
188 atomic_set(&kvm
->online_vcpus
, 0);
190 mutex_unlock(&kvm
->lock
);
193 static void kvm_mips_free_gpa_pt(struct kvm
*kvm
)
195 /* It should always be safe to remove after flushing the whole range */
196 WARN_ON(!kvm_mips_flush_gpa_pt(kvm
, 0, ~0));
197 pgd_free(NULL
, kvm
->arch
.gpa_mm
.pgd
);
200 void kvm_arch_destroy_vm(struct kvm
*kvm
)
202 kvm_mips_free_vcpus(kvm
);
203 kvm_mips_free_gpa_pt(kvm
);
206 long kvm_arch_dev_ioctl(struct file
*filp
, unsigned int ioctl
,
212 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
214 /* Flush whole GPA */
215 kvm_mips_flush_gpa_pt(kvm
, 0, ~0);
216 kvm_flush_remote_tlbs(kvm
);
219 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
220 struct kvm_memory_slot
*slot
)
223 * The slot has been made invalid (ready for moving or deletion), so we
224 * need to ensure that it can no longer be accessed by any guest VCPUs.
227 spin_lock(&kvm
->mmu_lock
);
228 /* Flush slot from GPA */
229 kvm_mips_flush_gpa_pt(kvm
, slot
->base_gfn
,
230 slot
->base_gfn
+ slot
->npages
- 1);
231 kvm_arch_flush_remote_tlbs_memslot(kvm
, slot
);
232 spin_unlock(&kvm
->mmu_lock
);
235 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
236 struct kvm_memory_slot
*memslot
,
237 const struct kvm_userspace_memory_region
*mem
,
238 enum kvm_mr_change change
)
243 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
244 const struct kvm_userspace_memory_region
*mem
,
245 struct kvm_memory_slot
*old
,
246 const struct kvm_memory_slot
*new,
247 enum kvm_mr_change change
)
251 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
252 __func__
, kvm
, mem
->slot
, mem
->guest_phys_addr
,
253 mem
->memory_size
, mem
->userspace_addr
);
256 * If dirty page logging is enabled, write protect all pages in the slot
257 * ready for dirty logging.
259 * There is no need to do this in any of the following cases:
260 * CREATE: No dirty mappings will already exist.
261 * MOVE/DELETE: The old mappings will already have been cleaned up by
262 * kvm_arch_flush_shadow_memslot()
264 if (change
== KVM_MR_FLAGS_ONLY
&&
265 (!(old
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) &&
266 new->flags
& KVM_MEM_LOG_DIRTY_PAGES
)) {
267 spin_lock(&kvm
->mmu_lock
);
268 /* Write protect GPA page table entries */
269 needs_flush
= kvm_mips_mkclean_gpa_pt(kvm
, new->base_gfn
,
270 new->base_gfn
+ new->npages
- 1);
272 kvm_arch_flush_remote_tlbs_memslot(kvm
, new);
273 spin_unlock(&kvm
->mmu_lock
);
277 static inline void dump_handler(const char *symbol
, void *start
, void *end
)
281 pr_debug("LEAF(%s)\n", symbol
);
283 pr_debug("\t.set push\n");
284 pr_debug("\t.set noreorder\n");
286 for (p
= start
; p
< (u32
*)end
; ++p
)
287 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p
, p
);
289 pr_debug("\t.set\tpop\n");
291 pr_debug("\tEND(%s)\n", symbol
);
294 /* low level hrtimer wake routine */
295 static enum hrtimer_restart
kvm_mips_comparecount_wakeup(struct hrtimer
*timer
)
297 struct kvm_vcpu
*vcpu
;
299 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.comparecount_timer
);
301 kvm_mips_callbacks
->queue_timer_int(vcpu
);
304 rcuwait_wake_up(&vcpu
->wait
);
306 return kvm_mips_count_timeout(vcpu
);
309 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
314 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
317 void *gebase
, *p
, *handler
, *refill_start
, *refill_end
;
320 kvm_debug("kvm @ %p: create cpu %d at %p\n",
321 vcpu
->kvm
, vcpu
->vcpu_id
, vcpu
);
323 err
= kvm_mips_callbacks
->vcpu_init(vcpu
);
327 hrtimer_init(&vcpu
->arch
.comparecount_timer
, CLOCK_MONOTONIC
,
329 vcpu
->arch
.comparecount_timer
.function
= kvm_mips_comparecount_wakeup
;
332 * Allocate space for host mode exception handlers that handle
335 if (cpu_has_veic
|| cpu_has_vint
)
336 size
= 0x200 + VECTORSPACING
* 64;
340 gebase
= kzalloc(ALIGN(size
, PAGE_SIZE
), GFP_KERNEL
);
344 goto out_uninit_vcpu
;
346 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
347 ALIGN(size
, PAGE_SIZE
), gebase
);
350 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
351 * limits us to the low 512MB of physical address space. If the memory
352 * we allocate is out of range, just give up now.
354 if (!cpu_has_ebase_wg
&& virt_to_phys(gebase
) >= 0x20000000) {
355 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
358 goto out_free_gebase
;
362 vcpu
->arch
.guest_ebase
= gebase
;
364 /* Build guest exception vectors dynamically in unmapped memory */
365 handler
= gebase
+ 0x2000;
367 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
368 refill_start
= gebase
;
369 if (IS_ENABLED(CONFIG_64BIT
))
370 refill_start
+= 0x080;
371 refill_end
= kvm_mips_build_tlb_refill_exception(refill_start
, handler
);
373 /* General Exception Entry point */
374 kvm_mips_build_exception(gebase
+ 0x180, handler
);
376 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
377 for (i
= 0; i
< 8; i
++) {
378 kvm_debug("L1 Vectored handler @ %p\n",
379 gebase
+ 0x200 + (i
* VECTORSPACING
));
380 kvm_mips_build_exception(gebase
+ 0x200 + i
* VECTORSPACING
,
384 /* General exit handler */
386 p
= kvm_mips_build_exit(p
);
388 /* Guest entry routine */
389 vcpu
->arch
.vcpu_run
= p
;
390 p
= kvm_mips_build_vcpu_run(p
);
392 /* Dump the generated code */
393 pr_debug("#include <asm/asm.h>\n");
394 pr_debug("#include <asm/regdef.h>\n");
396 dump_handler("kvm_vcpu_run", vcpu
->arch
.vcpu_run
, p
);
397 dump_handler("kvm_tlb_refill", refill_start
, refill_end
);
398 dump_handler("kvm_gen_exc", gebase
+ 0x180, gebase
+ 0x200);
399 dump_handler("kvm_exit", gebase
+ 0x2000, vcpu
->arch
.vcpu_run
);
401 /* Invalidate the icache for these ranges */
402 flush_icache_range((unsigned long)gebase
,
403 (unsigned long)gebase
+ ALIGN(size
, PAGE_SIZE
));
406 vcpu
->arch
.last_sched_cpu
= -1;
407 vcpu
->arch
.last_exec_cpu
= -1;
409 /* Initial guest state */
410 err
= kvm_mips_callbacks
->vcpu_setup(vcpu
);
412 goto out_free_gebase
;
419 kvm_mips_callbacks
->vcpu_uninit(vcpu
);
423 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
425 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
427 kvm_mips_dump_stats(vcpu
);
429 kvm_mmu_free_memory_caches(vcpu
);
430 kfree(vcpu
->arch
.guest_ebase
);
432 kvm_mips_callbacks
->vcpu_uninit(vcpu
);
435 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
436 struct kvm_guest_debug
*dbg
)
441 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
447 kvm_sigset_activate(vcpu
);
449 if (vcpu
->mmio_needed
) {
450 if (!vcpu
->mmio_is_write
)
451 kvm_mips_complete_mmio_load(vcpu
);
452 vcpu
->mmio_needed
= 0;
455 if (vcpu
->run
->immediate_exit
)
461 guest_enter_irqoff();
462 trace_kvm_enter(vcpu
);
465 * Make sure the read of VCPU requests in vcpu_run() callback is not
466 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
467 * flush request while the requester sees the VCPU as outside of guest
468 * mode and not needing an IPI.
470 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
472 r
= kvm_mips_callbacks
->vcpu_run(vcpu
);
479 kvm_sigset_deactivate(vcpu
);
485 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
486 struct kvm_mips_interrupt
*irq
)
488 int intr
= (int)irq
->irq
;
489 struct kvm_vcpu
*dvcpu
= NULL
;
491 if (intr
== kvm_priority_to_irq
[MIPS_EXC_INT_IPI_1
] ||
492 intr
== kvm_priority_to_irq
[MIPS_EXC_INT_IPI_2
] ||
493 intr
== (-kvm_priority_to_irq
[MIPS_EXC_INT_IPI_1
]) ||
494 intr
== (-kvm_priority_to_irq
[MIPS_EXC_INT_IPI_2
]))
495 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__
, irq
->cpu
,
501 dvcpu
= vcpu
->kvm
->vcpus
[irq
->cpu
];
503 if (intr
== 2 || intr
== 3 || intr
== 4 || intr
== 6) {
504 kvm_mips_callbacks
->queue_io_int(dvcpu
, irq
);
506 } else if (intr
== -2 || intr
== -3 || intr
== -4 || intr
== -6) {
507 kvm_mips_callbacks
->dequeue_io_int(dvcpu
, irq
);
509 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__
,
514 dvcpu
->arch
.wait
= 0;
516 rcuwait_wake_up(&dvcpu
->wait
);
521 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
522 struct kvm_mp_state
*mp_state
)
527 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
528 struct kvm_mp_state
*mp_state
)
533 static u64 kvm_mips_get_one_regs
[] = {
567 #ifndef CONFIG_CPU_MIPSR6
574 static u64 kvm_mips_get_one_regs_fpu
[] = {
576 KVM_REG_MIPS_FCR_CSR
,
579 static u64 kvm_mips_get_one_regs_msa
[] = {
581 KVM_REG_MIPS_MSA_CSR
,
584 static unsigned long kvm_mips_num_regs(struct kvm_vcpu
*vcpu
)
588 ret
= ARRAY_SIZE(kvm_mips_get_one_regs
);
589 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
)) {
590 ret
+= ARRAY_SIZE(kvm_mips_get_one_regs_fpu
) + 48;
592 if (boot_cpu_data
.fpu_id
& MIPS_FPIR_F64
)
595 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
596 ret
+= ARRAY_SIZE(kvm_mips_get_one_regs_msa
) + 32;
597 ret
+= kvm_mips_callbacks
->num_regs(vcpu
);
602 static int kvm_mips_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*indices
)
607 if (copy_to_user(indices
, kvm_mips_get_one_regs
,
608 sizeof(kvm_mips_get_one_regs
)))
610 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs
);
612 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
)) {
613 if (copy_to_user(indices
, kvm_mips_get_one_regs_fpu
,
614 sizeof(kvm_mips_get_one_regs_fpu
)))
616 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs_fpu
);
618 for (i
= 0; i
< 32; ++i
) {
619 index
= KVM_REG_MIPS_FPR_32(i
);
620 if (copy_to_user(indices
, &index
, sizeof(index
)))
624 /* skip odd doubles if no F64 */
625 if (i
& 1 && !(boot_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
628 index
= KVM_REG_MIPS_FPR_64(i
);
629 if (copy_to_user(indices
, &index
, sizeof(index
)))
635 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
)) {
636 if (copy_to_user(indices
, kvm_mips_get_one_regs_msa
,
637 sizeof(kvm_mips_get_one_regs_msa
)))
639 indices
+= ARRAY_SIZE(kvm_mips_get_one_regs_msa
);
641 for (i
= 0; i
< 32; ++i
) {
642 index
= KVM_REG_MIPS_VEC_128(i
);
643 if (copy_to_user(indices
, &index
, sizeof(index
)))
649 return kvm_mips_callbacks
->copy_reg_indices(vcpu
, indices
);
652 static int kvm_mips_get_reg(struct kvm_vcpu
*vcpu
,
653 const struct kvm_one_reg
*reg
)
655 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
656 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
663 /* General purpose registers */
664 case KVM_REG_MIPS_R0
... KVM_REG_MIPS_R31
:
665 v
= (long)vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
];
667 #ifndef CONFIG_CPU_MIPSR6
668 case KVM_REG_MIPS_HI
:
669 v
= (long)vcpu
->arch
.hi
;
671 case KVM_REG_MIPS_LO
:
672 v
= (long)vcpu
->arch
.lo
;
675 case KVM_REG_MIPS_PC
:
676 v
= (long)vcpu
->arch
.pc
;
679 /* Floating point registers */
680 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
681 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
683 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
684 /* Odd singles in top of even double when FR=0 */
685 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
686 v
= get_fpr32(&fpu
->fpr
[idx
], 0);
688 v
= get_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1);
690 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
691 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
693 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
694 /* Can't access odd doubles in FR=0 mode */
695 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
697 v
= get_fpr64(&fpu
->fpr
[idx
], 0);
699 case KVM_REG_MIPS_FCR_IR
:
700 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
702 v
= boot_cpu_data
.fpu_id
;
704 case KVM_REG_MIPS_FCR_CSR
:
705 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
710 /* MIPS SIMD Architecture (MSA) registers */
711 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
712 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
714 /* Can't access MSA registers in FR=0 mode */
715 if (!(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
717 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
718 #ifdef CONFIG_CPU_LITTLE_ENDIAN
719 /* least significant byte first */
720 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 0);
721 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 1);
723 /* most significant byte first */
724 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 1);
725 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 0);
728 case KVM_REG_MIPS_MSA_IR
:
729 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
731 v
= boot_cpu_data
.msa_id
;
733 case KVM_REG_MIPS_MSA_CSR
:
734 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
739 /* registers to be handled specially */
741 ret
= kvm_mips_callbacks
->get_one_reg(vcpu
, reg
, &v
);
746 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
747 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
749 return put_user(v
, uaddr64
);
750 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
751 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
754 return put_user(v32
, uaddr32
);
755 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
756 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
758 return copy_to_user(uaddr
, vs
, 16) ? -EFAULT
: 0;
764 static int kvm_mips_set_reg(struct kvm_vcpu
*vcpu
,
765 const struct kvm_one_reg
*reg
)
767 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
768 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
773 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
774 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
776 if (get_user(v
, uaddr64
) != 0)
778 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
779 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
782 if (get_user(v32
, uaddr32
) != 0)
785 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
786 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
788 return copy_from_user(vs
, uaddr
, 16) ? -EFAULT
: 0;
794 /* General purpose registers */
795 case KVM_REG_MIPS_R0
:
796 /* Silently ignore requests to set $0 */
798 case KVM_REG_MIPS_R1
... KVM_REG_MIPS_R31
:
799 vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
] = v
;
801 #ifndef CONFIG_CPU_MIPSR6
802 case KVM_REG_MIPS_HI
:
805 case KVM_REG_MIPS_LO
:
809 case KVM_REG_MIPS_PC
:
813 /* Floating point registers */
814 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
815 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
817 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
818 /* Odd singles in top of even double when FR=0 */
819 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
820 set_fpr32(&fpu
->fpr
[idx
], 0, v
);
822 set_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1, v
);
824 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
825 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
827 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
828 /* Can't access odd doubles in FR=0 mode */
829 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
831 set_fpr64(&fpu
->fpr
[idx
], 0, v
);
833 case KVM_REG_MIPS_FCR_IR
:
834 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
838 case KVM_REG_MIPS_FCR_CSR
:
839 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
844 /* MIPS SIMD Architecture (MSA) registers */
845 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
846 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
848 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
849 #ifdef CONFIG_CPU_LITTLE_ENDIAN
850 /* least significant byte first */
851 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[0]);
852 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[1]);
854 /* most significant byte first */
855 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[0]);
856 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[1]);
859 case KVM_REG_MIPS_MSA_IR
:
860 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
864 case KVM_REG_MIPS_MSA_CSR
:
865 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
870 /* registers to be handled specially */
872 return kvm_mips_callbacks
->set_one_reg(vcpu
, reg
, v
);
877 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
878 struct kvm_enable_cap
*cap
)
882 if (!kvm_vm_ioctl_check_extension(vcpu
->kvm
, cap
->cap
))
890 case KVM_CAP_MIPS_FPU
:
891 vcpu
->arch
.fpu_enabled
= true;
893 case KVM_CAP_MIPS_MSA
:
894 vcpu
->arch
.msa_enabled
= true;
904 long kvm_arch_vcpu_async_ioctl(struct file
*filp
, unsigned int ioctl
,
907 struct kvm_vcpu
*vcpu
= filp
->private_data
;
908 void __user
*argp
= (void __user
*)arg
;
910 if (ioctl
== KVM_INTERRUPT
) {
911 struct kvm_mips_interrupt irq
;
913 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
915 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
,
918 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
924 long kvm_arch_vcpu_ioctl(struct file
*filp
, unsigned int ioctl
,
927 struct kvm_vcpu
*vcpu
= filp
->private_data
;
928 void __user
*argp
= (void __user
*)arg
;
934 case KVM_SET_ONE_REG
:
935 case KVM_GET_ONE_REG
: {
936 struct kvm_one_reg reg
;
939 if (copy_from_user(®
, argp
, sizeof(reg
)))
941 if (ioctl
== KVM_SET_ONE_REG
)
942 r
= kvm_mips_set_reg(vcpu
, ®
);
944 r
= kvm_mips_get_reg(vcpu
, ®
);
947 case KVM_GET_REG_LIST
: {
948 struct kvm_reg_list __user
*user_list
= argp
;
949 struct kvm_reg_list reg_list
;
953 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
956 reg_list
.n
= kvm_mips_num_regs(vcpu
);
957 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
962 r
= kvm_mips_copy_reg_indices(vcpu
, user_list
->reg
);
965 case KVM_ENABLE_CAP
: {
966 struct kvm_enable_cap cap
;
969 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
971 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
982 void kvm_arch_sync_dirty_log(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
987 int kvm_arch_flush_remote_tlb(struct kvm
*kvm
)
989 kvm_mips_callbacks
->prepare_flush_shadow(kvm
);
993 void kvm_arch_flush_remote_tlbs_memslot(struct kvm
*kvm
,
994 const struct kvm_memory_slot
*memslot
)
996 kvm_flush_remote_tlbs(kvm
);
999 long kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
1011 int kvm_arch_init(void *opaque
)
1013 if (kvm_mips_callbacks
) {
1014 kvm_err("kvm: module already exists\n");
1018 return kvm_mips_emulation_init(&kvm_mips_callbacks
);
1021 void kvm_arch_exit(void)
1023 kvm_mips_callbacks
= NULL
;
1026 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1027 struct kvm_sregs
*sregs
)
1029 return -ENOIOCTLCMD
;
1032 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1033 struct kvm_sregs
*sregs
)
1035 return -ENOIOCTLCMD
;
1038 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1042 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1044 return -ENOIOCTLCMD
;
1047 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1049 return -ENOIOCTLCMD
;
1052 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1054 return VM_FAULT_SIGBUS
;
1057 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
1062 case KVM_CAP_ONE_REG
:
1063 case KVM_CAP_ENABLE_CAP
:
1064 case KVM_CAP_READONLY_MEM
:
1065 case KVM_CAP_SYNC_MMU
:
1066 case KVM_CAP_IMMEDIATE_EXIT
:
1069 case KVM_CAP_NR_VCPUS
:
1070 r
= num_online_cpus();
1072 case KVM_CAP_MAX_VCPUS
:
1075 case KVM_CAP_MAX_VCPU_ID
:
1076 r
= KVM_MAX_VCPU_ID
;
1078 case KVM_CAP_MIPS_FPU
:
1079 /* We don't handle systems with inconsistent cpu_has_fpu */
1080 r
= !!raw_cpu_has_fpu
;
1082 case KVM_CAP_MIPS_MSA
:
1084 * We don't support MSA vector partitioning yet:
1085 * 1) It would require explicit support which can't be tested
1086 * yet due to lack of support in current hardware.
1087 * 2) It extends the state that would need to be saved/restored
1088 * by e.g. QEMU for migration.
1090 * When vector partitioning hardware becomes available, support
1091 * could be added by requiring a flag when enabling
1092 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1093 * to save/restore the appropriate extra state.
1095 r
= cpu_has_msa
&& !(boot_cpu_data
.msa_id
& MSA_IR_WRPF
);
1098 r
= kvm_mips_callbacks
->check_extension(kvm
, ext
);
1104 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1106 return kvm_mips_pending_timer(vcpu
) ||
1107 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
) & C_TI
;
1110 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
1113 struct mips_coproc
*cop0
;
1118 kvm_debug("VCPU Register Dump:\n");
1119 kvm_debug("\tpc = 0x%08lx\n", vcpu
->arch
.pc
);
1120 kvm_debug("\texceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
1122 for (i
= 0; i
< 32; i
+= 4) {
1123 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i
,
1125 vcpu
->arch
.gprs
[i
+ 1],
1126 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
1128 kvm_debug("\thi: 0x%08lx\n", vcpu
->arch
.hi
);
1129 kvm_debug("\tlo: 0x%08lx\n", vcpu
->arch
.lo
);
1131 cop0
= vcpu
->arch
.cop0
;
1132 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1133 kvm_read_c0_guest_status(cop0
),
1134 kvm_read_c0_guest_cause(cop0
));
1136 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0
));
1141 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1147 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1148 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
1149 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
1150 vcpu
->arch
.hi
= regs
->hi
;
1151 vcpu
->arch
.lo
= regs
->lo
;
1152 vcpu
->arch
.pc
= regs
->pc
;
1158 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1164 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1165 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
1167 regs
->hi
= vcpu
->arch
.hi
;
1168 regs
->lo
= vcpu
->arch
.lo
;
1169 regs
->pc
= vcpu
->arch
.pc
;
1175 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1176 struct kvm_translation
*tr
)
1181 static void kvm_mips_set_c0_status(void)
1183 u32 status
= read_c0_status();
1188 write_c0_status(status
);
1193 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1195 int kvm_mips_handle_exit(struct kvm_vcpu
*vcpu
)
1197 struct kvm_run
*run
= vcpu
->run
;
1198 u32 cause
= vcpu
->arch
.host_cp0_cause
;
1199 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1200 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
1201 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1202 enum emulation_result er
= EMULATE_DONE
;
1204 int ret
= RESUME_GUEST
;
1206 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1208 /* Set a default exit reason */
1209 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1210 run
->ready_for_interrupt_injection
= 1;
1213 * Set the appropriate status bits based on host CPU features,
1214 * before we hit the scheduler
1216 kvm_mips_set_c0_status();
1220 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1221 cause
, opc
, run
, vcpu
);
1222 trace_kvm_exit(vcpu
, exccode
);
1226 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu
->vcpu_id
, opc
);
1228 ++vcpu
->stat
.int_exits
;
1237 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc
);
1239 ++vcpu
->stat
.cop_unusable_exits
;
1240 ret
= kvm_mips_callbacks
->handle_cop_unusable(vcpu
);
1241 /* XXXKYMA: Might need to return to user space */
1242 if (run
->exit_reason
== KVM_EXIT_IRQ_WINDOW_OPEN
)
1247 ++vcpu
->stat
.tlbmod_exits
;
1248 ret
= kvm_mips_callbacks
->handle_tlb_mod(vcpu
);
1252 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1253 cause
, kvm_read_c0_guest_status(vcpu
->arch
.cop0
), opc
,
1256 ++vcpu
->stat
.tlbmiss_st_exits
;
1257 ret
= kvm_mips_callbacks
->handle_tlb_st_miss(vcpu
);
1261 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1262 cause
, opc
, badvaddr
);
1264 ++vcpu
->stat
.tlbmiss_ld_exits
;
1265 ret
= kvm_mips_callbacks
->handle_tlb_ld_miss(vcpu
);
1269 ++vcpu
->stat
.addrerr_st_exits
;
1270 ret
= kvm_mips_callbacks
->handle_addr_err_st(vcpu
);
1274 ++vcpu
->stat
.addrerr_ld_exits
;
1275 ret
= kvm_mips_callbacks
->handle_addr_err_ld(vcpu
);
1279 ++vcpu
->stat
.syscall_exits
;
1280 ret
= kvm_mips_callbacks
->handle_syscall(vcpu
);
1284 ++vcpu
->stat
.resvd_inst_exits
;
1285 ret
= kvm_mips_callbacks
->handle_res_inst(vcpu
);
1289 ++vcpu
->stat
.break_inst_exits
;
1290 ret
= kvm_mips_callbacks
->handle_break(vcpu
);
1294 ++vcpu
->stat
.trap_inst_exits
;
1295 ret
= kvm_mips_callbacks
->handle_trap(vcpu
);
1298 case EXCCODE_MSAFPE
:
1299 ++vcpu
->stat
.msa_fpe_exits
;
1300 ret
= kvm_mips_callbacks
->handle_msa_fpe(vcpu
);
1304 ++vcpu
->stat
.fpe_exits
;
1305 ret
= kvm_mips_callbacks
->handle_fpe(vcpu
);
1308 case EXCCODE_MSADIS
:
1309 ++vcpu
->stat
.msa_disabled_exits
;
1310 ret
= kvm_mips_callbacks
->handle_msa_disabled(vcpu
);
1314 /* defer exit accounting to handler */
1315 ret
= kvm_mips_callbacks
->handle_guest_exit(vcpu
);
1319 if (cause
& CAUSEF_BD
)
1322 kvm_get_badinstr(opc
, vcpu
, &inst
);
1323 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1324 exccode
, opc
, inst
, badvaddr
,
1325 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
1326 kvm_arch_vcpu_dump_regs(vcpu
);
1327 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1333 local_irq_disable();
1335 if (ret
== RESUME_GUEST
)
1336 kvm_vz_acquire_htimer(vcpu
);
1338 if (er
== EMULATE_DONE
&& !(ret
& RESUME_HOST
))
1339 kvm_mips_deliver_interrupts(vcpu
, cause
);
1341 if (!(ret
& RESUME_HOST
)) {
1342 /* Only check for signals if not already exiting to userspace */
1343 if (signal_pending(current
)) {
1344 run
->exit_reason
= KVM_EXIT_INTR
;
1345 ret
= (-EINTR
<< 2) | RESUME_HOST
;
1346 ++vcpu
->stat
.signal_exits
;
1347 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_SIGNAL
);
1351 if (ret
== RESUME_GUEST
) {
1352 trace_kvm_reenter(vcpu
);
1355 * Make sure the read of VCPU requests in vcpu_reenter()
1356 * callback is not reordered ahead of the write to vcpu->mode,
1357 * or we could miss a TLB flush request while the requester sees
1358 * the VCPU as outside of guest mode and not needing an IPI.
1360 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
1362 kvm_mips_callbacks
->vcpu_reenter(vcpu
);
1365 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1366 * is live), restore FCR31 / MSACSR.
1368 * This should be before returning to the guest exception
1369 * vector, as it may well cause an [MSA] FP exception if there
1370 * are pending exception bits unmasked. (see
1371 * kvm_mips_csr_die_notifier() for how that is handled).
1373 if (kvm_mips_guest_has_fpu(&vcpu
->arch
) &&
1374 read_c0_status() & ST0_CU1
)
1375 __kvm_restore_fcsr(&vcpu
->arch
);
1377 if (kvm_mips_guest_has_msa(&vcpu
->arch
) &&
1378 read_c0_config5() & MIPS_CONF5_MSAEN
)
1379 __kvm_restore_msacsr(&vcpu
->arch
);
1384 /* Enable FPU for guest and restore context */
1385 void kvm_own_fpu(struct kvm_vcpu
*vcpu
)
1387 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1388 unsigned int sr
, cfg5
;
1392 sr
= kvm_read_c0_guest_status(cop0
);
1395 * If MSA state is already live, it is undefined how it interacts with
1396 * FR=0 FPU state, and we don't want to hit reserved instruction
1397 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1398 * play it safe and save it first.
1400 if (cpu_has_msa
&& sr
& ST0_CU1
&& !(sr
& ST0_FR
) &&
1401 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1405 * Enable FPU for guest
1406 * We set FR and FRE according to guest context
1408 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1410 cfg5
= kvm_read_c0_guest_config5(cop0
);
1411 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1413 enable_fpu_hazard();
1415 /* If guest FPU state not active, restore it now */
1416 if (!(vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)) {
1417 __kvm_restore_fpu(&vcpu
->arch
);
1418 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_FPU
;
1419 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_FPU
);
1421 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_ENABLE
, KVM_TRACE_AUX_FPU
);
1427 #ifdef CONFIG_CPU_HAS_MSA
1428 /* Enable MSA for guest and restore context */
1429 void kvm_own_msa(struct kvm_vcpu
*vcpu
)
1431 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1432 unsigned int sr
, cfg5
;
1437 * Enable FPU if enabled in guest, since we're restoring FPU context
1438 * anyway. We set FR and FRE according to guest context.
1440 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
1441 sr
= kvm_read_c0_guest_status(cop0
);
1444 * If FR=0 FPU state is already live, it is undefined how it
1445 * interacts with MSA state, so play it safe and save it first.
1447 if (!(sr
& ST0_FR
) &&
1448 (vcpu
->arch
.aux_inuse
& (KVM_MIPS_AUX_FPU
|
1449 KVM_MIPS_AUX_MSA
)) == KVM_MIPS_AUX_FPU
)
1452 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1453 if (sr
& ST0_CU1
&& cpu_has_fre
) {
1454 cfg5
= kvm_read_c0_guest_config5(cop0
);
1455 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1459 /* Enable MSA for guest */
1460 set_c0_config5(MIPS_CONF5_MSAEN
);
1461 enable_fpu_hazard();
1463 switch (vcpu
->arch
.aux_inuse
& (KVM_MIPS_AUX_FPU
| KVM_MIPS_AUX_MSA
)) {
1464 case KVM_MIPS_AUX_FPU
:
1466 * Guest FPU state already loaded, only restore upper MSA state
1468 __kvm_restore_msa_upper(&vcpu
->arch
);
1469 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_MSA
;
1470 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_MSA
);
1473 /* Neither FPU or MSA already active, restore full MSA state */
1474 __kvm_restore_msa(&vcpu
->arch
);
1475 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_MSA
;
1476 if (kvm_mips_guest_has_fpu(&vcpu
->arch
))
1477 vcpu
->arch
.aux_inuse
|= KVM_MIPS_AUX_FPU
;
1478 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
,
1479 KVM_TRACE_AUX_FPU_MSA
);
1482 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_ENABLE
, KVM_TRACE_AUX_MSA
);
1490 /* Drop FPU & MSA without saving it */
1491 void kvm_drop_fpu(struct kvm_vcpu
*vcpu
)
1494 if (cpu_has_msa
&& vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
) {
1496 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_DISCARD
, KVM_TRACE_AUX_MSA
);
1497 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_MSA
;
1499 if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1500 clear_c0_status(ST0_CU1
| ST0_FR
);
1501 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_DISCARD
, KVM_TRACE_AUX_FPU
);
1502 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_FPU
;
1507 /* Save and disable FPU & MSA */
1508 void kvm_lose_fpu(struct kvm_vcpu
*vcpu
)
1511 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1512 * is disabled in guest context (software), but the register state in
1513 * the hardware may still be in use.
1514 * This is why we explicitly re-enable the hardware before saving.
1518 if (cpu_has_msa
&& vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
) {
1519 __kvm_save_msa(&vcpu
->arch
);
1520 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU_MSA
);
1522 /* Disable MSA & FPU */
1524 if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1525 clear_c0_status(ST0_CU1
| ST0_FR
);
1526 disable_fpu_hazard();
1528 vcpu
->arch
.aux_inuse
&= ~(KVM_MIPS_AUX_FPU
| KVM_MIPS_AUX_MSA
);
1529 } else if (vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
) {
1530 __kvm_save_fpu(&vcpu
->arch
);
1531 vcpu
->arch
.aux_inuse
&= ~KVM_MIPS_AUX_FPU
;
1532 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU
);
1535 clear_c0_status(ST0_CU1
| ST0_FR
);
1536 disable_fpu_hazard();
1542 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1543 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1544 * exception if cause bits are set in the value being written.
1546 static int kvm_mips_csr_die_notify(struct notifier_block
*self
,
1547 unsigned long cmd
, void *ptr
)
1549 struct die_args
*args
= (struct die_args
*)ptr
;
1550 struct pt_regs
*regs
= args
->regs
;
1553 /* Only interested in FPE and MSAFPE */
1554 if (cmd
!= DIE_FP
&& cmd
!= DIE_MSAFP
)
1557 /* Return immediately if guest context isn't active */
1558 if (!(current
->flags
& PF_VCPU
))
1561 /* Should never get here from user mode */
1562 BUG_ON(user_mode(regs
));
1564 pc
= instruction_pointer(regs
);
1567 /* match 2nd instruction in __kvm_restore_fcsr */
1568 if (pc
!= (unsigned long)&__kvm_restore_fcsr
+ 4)
1572 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1574 pc
< (unsigned long)&__kvm_restore_msacsr
+ 4 ||
1575 pc
> (unsigned long)&__kvm_restore_msacsr
+ 8)
1580 /* Move PC forward a little and continue executing */
1581 instruction_pointer(regs
) += 4;
1586 static struct notifier_block kvm_mips_csr_die_notifier
= {
1587 .notifier_call
= kvm_mips_csr_die_notify
,
1590 static u32 kvm_default_priority_to_irq
[MIPS_EXC_MAX
] = {
1591 [MIPS_EXC_INT_TIMER
] = C_IRQ5
,
1592 [MIPS_EXC_INT_IO_1
] = C_IRQ0
,
1593 [MIPS_EXC_INT_IPI_1
] = C_IRQ1
,
1594 [MIPS_EXC_INT_IPI_2
] = C_IRQ2
,
1597 static u32 kvm_loongson3_priority_to_irq
[MIPS_EXC_MAX
] = {
1598 [MIPS_EXC_INT_TIMER
] = C_IRQ5
,
1599 [MIPS_EXC_INT_IO_1
] = C_IRQ0
,
1600 [MIPS_EXC_INT_IO_2
] = C_IRQ1
,
1601 [MIPS_EXC_INT_IPI_1
] = C_IRQ4
,
1604 u32
*kvm_priority_to_irq
= kvm_default_priority_to_irq
;
1606 u32
kvm_irq_to_priority(u32 irq
)
1610 for (i
= MIPS_EXC_INT_TIMER
; i
< MIPS_EXC_MAX
; i
++) {
1611 if (kvm_priority_to_irq
[i
] == (1 << (irq
+ 8)))
1615 return MIPS_EXC_MAX
;
1618 static int __init
kvm_mips_init(void)
1623 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1627 ret
= kvm_mips_entry_setup();
1631 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1636 if (boot_cpu_type() == CPU_LOONGSON64
)
1637 kvm_priority_to_irq
= kvm_loongson3_priority_to_irq
;
1639 register_die_notifier(&kvm_mips_csr_die_notifier
);
1644 static void __exit
kvm_mips_exit(void)
1648 unregister_die_notifier(&kvm_mips_csr_die_notifier
);
1651 module_init(kvm_mips_init
);
1652 module_exit(kvm_mips_exit
);
1654 EXPORT_TRACEPOINT_SYMBOL(kvm_exit
);