2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #include "interrupt.h"
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
44 unsigned int dspcontrol
;
45 union mips_instruction insn
;
46 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
48 long nextpc
= KVM_INVALID_INST
;
53 /* Read the instruction */
54 insn
.word
= kvm_get_inst((u32
*) epc
, vcpu
);
56 if (insn
.word
== KVM_INVALID_INST
)
57 return KVM_INVALID_INST
;
59 switch (insn
.i_format
.opcode
) {
60 /* jr and jalr are in r_format format. */
62 switch (insn
.r_format
.func
) {
64 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
67 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
73 * This group contains:
74 * bltz_op, bgez_op, bltzl_op, bgezl_op,
75 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
78 switch (insn
.i_format
.rt
) {
81 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
82 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
90 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
91 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
99 arch
->gprs
[31] = epc
+ 8;
100 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
101 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
109 arch
->gprs
[31] = epc
+ 8;
110 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
111 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
120 dspcontrol
= rddsp(0x01);
122 if (dspcontrol
>= 32)
123 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
131 /* These are unconditional and in j_format. */
133 arch
->gprs
[31] = instpc
+ 8;
138 epc
|= (insn
.j_format
.target
<< 2);
142 /* These are conditional and in i_format. */
145 if (arch
->gprs
[insn
.i_format
.rs
] ==
146 arch
->gprs
[insn
.i_format
.rt
])
147 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
155 if (arch
->gprs
[insn
.i_format
.rs
] !=
156 arch
->gprs
[insn
.i_format
.rt
])
157 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
163 case blez_op
: /* POP06 */
164 #ifndef CONFIG_CPU_MIPSR6
165 case blezl_op
: /* removed in R6 */
167 if (insn
.i_format
.rt
!= 0)
169 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
170 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
176 case bgtz_op
: /* POP07 */
177 #ifndef CONFIG_CPU_MIPSR6
178 case bgtzl_op
: /* removed in R6 */
180 if (insn
.i_format
.rt
!= 0)
182 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
183 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
189 /* And now the FPA/cp1 branch instructions. */
191 kvm_err("%s: unsupported cop1_op\n", __func__
);
194 #ifdef CONFIG_CPU_MIPSR6
195 /* R6 added the following compact branches with forbidden slots */
196 case blezl_op
: /* POP26 */
197 case bgtzl_op
: /* POP27 */
198 /* only rt == 0 isn't compact branch */
199 if (insn
.i_format
.rt
!= 0)
204 /* only rs == rt == 0 is reserved, rest are compact branches */
205 if (insn
.i_format
.rs
!= 0 || insn
.i_format
.rt
!= 0)
210 /* only rs == 0 isn't compact branch */
211 if (insn
.i_format
.rs
!= 0)
216 * If we've hit an exception on the forbidden slot, then
217 * the branch must not have been taken.
224 /* Compact branches not supported before R6 */
232 kvm_err("%s: unaligned epc\n", __func__
);
236 kvm_err("%s: DSP branch but not DSP ASE\n", __func__
);
240 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, u32 cause
)
242 unsigned long branch_pc
;
243 enum emulation_result er
= EMULATE_DONE
;
245 if (cause
& CAUSEF_BD
) {
246 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
247 if (branch_pc
== KVM_INVALID_INST
) {
250 vcpu
->arch
.pc
= branch_pc
;
251 kvm_debug("BD update_pc(): New PC: %#lx\n",
257 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
263 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
264 * @vcpu: Virtual CPU.
266 * Returns: 1 if the CP0_Count timer is disabled by either the guest
267 * CP0_Cause.DC bit or the count_ctl.DC bit.
268 * 0 otherwise (in which case CP0_Count timer is running).
270 static inline int kvm_mips_count_disabled(struct kvm_vcpu
*vcpu
)
272 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
274 return (vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) ||
275 (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
);
279 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
281 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
283 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
285 static u32
kvm_mips_ktime_to_count(struct kvm_vcpu
*vcpu
, ktime_t now
)
290 now_ns
= ktime_to_ns(now
);
291 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
293 if (delta
>= vcpu
->arch
.count_period
) {
294 /* If delta is out of safe range the bias needs adjusting */
295 periods
= div64_s64(now_ns
, vcpu
->arch
.count_period
);
296 vcpu
->arch
.count_dyn_bias
= -periods
* vcpu
->arch
.count_period
;
297 /* Recalculate delta with new bias */
298 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
302 * We've ensured that:
303 * delta < count_period
305 * Therefore the intermediate delta*count_hz will never overflow since
306 * at the boundary condition:
307 * delta = count_period
308 * delta = NSEC_PER_SEC * 2^32 / count_hz
309 * delta * count_hz = NSEC_PER_SEC * 2^32
311 return div_u64(delta
* vcpu
->arch
.count_hz
, NSEC_PER_SEC
);
315 * kvm_mips_count_time() - Get effective current time.
316 * @vcpu: Virtual CPU.
318 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
319 * except when the master disable bit is set in count_ctl, in which case it is
320 * count_resume, i.e. the time that the count was disabled.
322 * Returns: Effective monotonic ktime for CP0_Count.
324 static inline ktime_t
kvm_mips_count_time(struct kvm_vcpu
*vcpu
)
326 if (unlikely(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
327 return vcpu
->arch
.count_resume
;
333 * kvm_mips_read_count_running() - Read the current count value as if running.
334 * @vcpu: Virtual CPU.
335 * @now: Kernel time to read CP0_Count at.
337 * Returns the current guest CP0_Count register at time @now and handles if the
338 * timer interrupt is pending and hasn't been handled yet.
340 * Returns: The current value of the guest CP0_Count register.
342 static u32
kvm_mips_read_count_running(struct kvm_vcpu
*vcpu
, ktime_t now
)
344 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
345 ktime_t expires
, threshold
;
349 /* Calculate the biased and scaled guest CP0_Count */
350 count
= vcpu
->arch
.count_bias
+ kvm_mips_ktime_to_count(vcpu
, now
);
351 compare
= kvm_read_c0_guest_compare(cop0
);
354 * Find whether CP0_Count has reached the closest timer interrupt. If
355 * not, we shouldn't inject it.
357 if ((s32
)(count
- compare
) < 0)
361 * The CP0_Count we're going to return has already reached the closest
362 * timer interrupt. Quickly check if it really is a new interrupt by
363 * looking at whether the interval until the hrtimer expiry time is
364 * less than 1/4 of the timer period.
366 expires
= hrtimer_get_expires(&vcpu
->arch
.comparecount_timer
);
367 threshold
= ktime_add_ns(now
, vcpu
->arch
.count_period
/ 4);
368 if (ktime_before(expires
, threshold
)) {
370 * Cancel it while we handle it so there's no chance of
371 * interference with the timeout handler.
373 running
= hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
375 /* Nothing should be waiting on the timeout */
376 kvm_mips_callbacks
->queue_timer_int(vcpu
);
379 * Restart the timer if it was running based on the expiry time
380 * we read, so that we don't push it back 2 periods.
383 expires
= ktime_add_ns(expires
,
384 vcpu
->arch
.count_period
);
385 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expires
,
394 * kvm_mips_read_count() - Read the current count value.
395 * @vcpu: Virtual CPU.
397 * Read the current guest CP0_Count value, taking into account whether the timer
400 * Returns: The current guest CP0_Count value.
402 u32
kvm_mips_read_count(struct kvm_vcpu
*vcpu
)
404 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
406 /* If count disabled just read static copy of count */
407 if (kvm_mips_count_disabled(vcpu
))
408 return kvm_read_c0_guest_count(cop0
);
410 return kvm_mips_read_count_running(vcpu
, ktime_get());
414 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
415 * @vcpu: Virtual CPU.
416 * @count: Output pointer for CP0_Count value at point of freeze.
418 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
419 * at the point it was frozen. It is guaranteed that any pending interrupts at
420 * the point it was frozen are handled, and none after that point.
422 * This is useful where the time/CP0_Count is needed in the calculation of the
425 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
427 * Returns: The ktime at the point of freeze.
429 static ktime_t
kvm_mips_freeze_hrtimer(struct kvm_vcpu
*vcpu
, u32
*count
)
433 /* stop hrtimer before finding time */
434 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
437 /* find count at this point and handle pending hrtimer */
438 *count
= kvm_mips_read_count_running(vcpu
, now
);
444 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
445 * @vcpu: Virtual CPU.
446 * @now: ktime at point of resume.
447 * @count: CP0_Count at point of resume.
449 * Resumes the timer and updates the timer expiry based on @now and @count.
450 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
451 * parameters need to be changed.
453 * It is guaranteed that a timer interrupt immediately after resume will be
454 * handled, but not if CP_Compare is exactly at @count. That case is already
455 * handled by kvm_mips_freeze_timer().
457 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
459 static void kvm_mips_resume_hrtimer(struct kvm_vcpu
*vcpu
,
460 ktime_t now
, u32 count
)
462 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
467 /* Calculate timeout (wrap 0 to 2^32) */
468 compare
= kvm_read_c0_guest_compare(cop0
);
469 delta
= (u64
)(u32
)(compare
- count
- 1) + 1;
470 delta
= div_u64(delta
* NSEC_PER_SEC
, vcpu
->arch
.count_hz
);
471 expire
= ktime_add_ns(now
, delta
);
473 /* Update hrtimer to use new timeout */
474 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
475 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expire
, HRTIMER_MODE_ABS
);
479 * kvm_mips_write_count() - Modify the count and update timer.
480 * @vcpu: Virtual CPU.
481 * @count: Guest CP0_Count value to set.
483 * Sets the CP0_Count value and updates the timer accordingly.
485 void kvm_mips_write_count(struct kvm_vcpu
*vcpu
, u32 count
)
487 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
491 now
= kvm_mips_count_time(vcpu
);
492 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
494 if (kvm_mips_count_disabled(vcpu
))
495 /* The timer's disabled, adjust the static count */
496 kvm_write_c0_guest_count(cop0
, count
);
499 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
503 * kvm_mips_init_count() - Initialise timer.
504 * @vcpu: Virtual CPU.
506 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
507 * it going if it's enabled.
509 void kvm_mips_init_count(struct kvm_vcpu
*vcpu
)
512 vcpu
->arch
.count_hz
= 100*1000*1000;
513 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32,
514 vcpu
->arch
.count_hz
);
515 vcpu
->arch
.count_dyn_bias
= 0;
518 kvm_mips_write_count(vcpu
, 0);
522 * kvm_mips_set_count_hz() - Update the frequency of the timer.
523 * @vcpu: Virtual CPU.
524 * @count_hz: Frequency of CP0_Count timer in Hz.
526 * Change the frequency of the CP0_Count timer. This is done atomically so that
527 * CP0_Count is continuous and no timer interrupt is lost.
529 * Returns: -EINVAL if @count_hz is out of range.
532 int kvm_mips_set_count_hz(struct kvm_vcpu
*vcpu
, s64 count_hz
)
534 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
539 /* ensure the frequency is in a sensible range... */
540 if (count_hz
<= 0 || count_hz
> NSEC_PER_SEC
)
542 /* ... and has actually changed */
543 if (vcpu
->arch
.count_hz
== count_hz
)
546 /* Safely freeze timer so we can keep it continuous */
547 dc
= kvm_mips_count_disabled(vcpu
);
549 now
= kvm_mips_count_time(vcpu
);
550 count
= kvm_read_c0_guest_count(cop0
);
552 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
555 /* Update the frequency */
556 vcpu
->arch
.count_hz
= count_hz
;
557 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32, count_hz
);
558 vcpu
->arch
.count_dyn_bias
= 0;
560 /* Calculate adjusted bias so dynamic count is unchanged */
561 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
563 /* Update and resume hrtimer */
565 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
570 * kvm_mips_write_compare() - Modify compare and update timer.
571 * @vcpu: Virtual CPU.
572 * @compare: New CP0_Compare value.
573 * @ack: Whether to acknowledge timer interrupt.
575 * Update CP0_Compare to a new value and update the timeout.
576 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
577 * any pending timer interrupt is preserved.
579 void kvm_mips_write_compare(struct kvm_vcpu
*vcpu
, u32 compare
, bool ack
)
581 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
583 u32 old_compare
= kvm_read_c0_guest_compare(cop0
);
587 /* if unchanged, must just be an ack */
588 if (old_compare
== compare
) {
591 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
592 kvm_write_c0_guest_compare(cop0
, compare
);
596 /* freeze_hrtimer() takes care of timer interrupts <= count */
597 dc
= kvm_mips_count_disabled(vcpu
);
599 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
602 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
604 kvm_write_c0_guest_compare(cop0
, compare
);
606 /* resume_hrtimer() takes care of timer interrupts > count */
608 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
612 * kvm_mips_count_disable() - Disable count.
613 * @vcpu: Virtual CPU.
615 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
616 * time will be handled but not after.
618 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
619 * count_ctl.DC has been set (count disabled).
621 * Returns: The time that the timer was stopped.
623 static ktime_t
kvm_mips_count_disable(struct kvm_vcpu
*vcpu
)
625 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
630 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
632 /* Set the static count from the dynamic count, handling pending TI */
634 count
= kvm_mips_read_count_running(vcpu
, now
);
635 kvm_write_c0_guest_count(cop0
, count
);
641 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
642 * @vcpu: Virtual CPU.
644 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
645 * before the final stop time will be handled if the timer isn't disabled by
646 * count_ctl.DC, but not after.
648 * Assumes CP0_Cause.DC is clear (count enabled).
650 void kvm_mips_count_disable_cause(struct kvm_vcpu
*vcpu
)
652 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
654 kvm_set_c0_guest_cause(cop0
, CAUSEF_DC
);
655 if (!(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
656 kvm_mips_count_disable(vcpu
);
660 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
661 * @vcpu: Virtual CPU.
663 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
664 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
665 * potentially before even returning, so the caller should be careful with
666 * ordering of CP0_Cause modifications so as not to lose it.
668 * Assumes CP0_Cause.DC is set (count disabled).
670 void kvm_mips_count_enable_cause(struct kvm_vcpu
*vcpu
)
672 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
675 kvm_clear_c0_guest_cause(cop0
, CAUSEF_DC
);
678 * Set the dynamic count to match the static count.
679 * This starts the hrtimer if count_ctl.DC allows it.
680 * Otherwise it conveniently updates the biases.
682 count
= kvm_read_c0_guest_count(cop0
);
683 kvm_mips_write_count(vcpu
, count
);
687 * kvm_mips_set_count_ctl() - Update the count control KVM register.
688 * @vcpu: Virtual CPU.
689 * @count_ctl: Count control register new value.
691 * Set the count control KVM register. The timer is updated accordingly.
693 * Returns: -EINVAL if reserved bits are set.
696 int kvm_mips_set_count_ctl(struct kvm_vcpu
*vcpu
, s64 count_ctl
)
698 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
699 s64 changed
= count_ctl
^ vcpu
->arch
.count_ctl
;
704 /* Only allow defined bits to be changed */
705 if (changed
& ~(s64
)(KVM_REG_MIPS_COUNT_CTL_DC
))
708 /* Apply new value */
709 vcpu
->arch
.count_ctl
= count_ctl
;
711 /* Master CP0_Count disable */
712 if (changed
& KVM_REG_MIPS_COUNT_CTL_DC
) {
713 /* Is CP0_Cause.DC already disabling CP0_Count? */
714 if (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
) {
715 if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
)
716 /* Just record the current time */
717 vcpu
->arch
.count_resume
= ktime_get();
718 } else if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) {
719 /* disable timer and record current time */
720 vcpu
->arch
.count_resume
= kvm_mips_count_disable(vcpu
);
723 * Calculate timeout relative to static count at resume
724 * time (wrap 0 to 2^32).
726 count
= kvm_read_c0_guest_count(cop0
);
727 compare
= kvm_read_c0_guest_compare(cop0
);
728 delta
= (u64
)(u32
)(compare
- count
- 1) + 1;
729 delta
= div_u64(delta
* NSEC_PER_SEC
,
730 vcpu
->arch
.count_hz
);
731 expire
= ktime_add_ns(vcpu
->arch
.count_resume
, delta
);
733 /* Handle pending interrupt */
735 if (ktime_compare(now
, expire
) >= 0)
736 /* Nothing should be waiting on the timeout */
737 kvm_mips_callbacks
->queue_timer_int(vcpu
);
739 /* Resume hrtimer without changing bias */
740 count
= kvm_mips_read_count_running(vcpu
, now
);
741 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
749 * kvm_mips_set_count_resume() - Update the count resume KVM register.
750 * @vcpu: Virtual CPU.
751 * @count_resume: Count resume register new value.
753 * Set the count resume KVM register.
755 * Returns: -EINVAL if out of valid range (0..now).
758 int kvm_mips_set_count_resume(struct kvm_vcpu
*vcpu
, s64 count_resume
)
761 * It doesn't make sense for the resume time to be in the future, as it
762 * would be possible for the next interrupt to be more than a full
763 * period in the future.
765 if (count_resume
< 0 || count_resume
> ktime_to_ns(ktime_get()))
768 vcpu
->arch
.count_resume
= ns_to_ktime(count_resume
);
773 * kvm_mips_count_timeout() - Push timer forward on timeout.
774 * @vcpu: Virtual CPU.
776 * Handle an hrtimer event by push the hrtimer forward a period.
778 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
780 enum hrtimer_restart
kvm_mips_count_timeout(struct kvm_vcpu
*vcpu
)
782 /* Add the Count period to the current expiry time */
783 hrtimer_add_expires_ns(&vcpu
->arch
.comparecount_timer
,
784 vcpu
->arch
.count_period
);
785 return HRTIMER_RESTART
;
788 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
790 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
791 enum emulation_result er
= EMULATE_DONE
;
793 if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
794 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
795 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
796 } else if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
797 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
798 kvm_read_c0_guest_epc(cop0
));
799 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
800 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
803 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
811 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
813 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
814 vcpu
->arch
.pending_exceptions
);
816 ++vcpu
->stat
.wait_exits
;
817 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_WAIT
);
818 if (!vcpu
->arch
.pending_exceptions
) {
820 kvm_vcpu_block(vcpu
);
823 * We we are runnable, then definitely go off to user space to
824 * check if any I/O interrupts are pending.
826 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
827 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
828 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
836 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
837 * we can catch this, if things ever change
839 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
841 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
842 unsigned long pc
= vcpu
->arch
.pc
;
844 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
849 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
850 * @vcpu: VCPU with changed mappings.
851 * @tlb: TLB entry being removed.
853 * This is called to indicate a single change in guest MMU mappings, so that we
854 * can arrange TLB flushes on this and other CPUs.
856 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu
*vcpu
,
857 struct kvm_mips_tlb
*tlb
)
859 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
860 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
864 /* No need to flush for entries which are already invalid */
865 if (!((tlb
->tlb_lo
[0] | tlb
->tlb_lo
[1]) & ENTRYLO_V
))
867 /* User address space doesn't need flushing for KSeg2/3 changes */
868 user
= tlb
->tlb_hi
< KVM_GUEST_KSEG0
;
873 * Probe the shadow host TLB for the entry being overwritten, if one
874 * matches, invalidate it
876 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
, user
, true);
878 /* Invalidate the whole ASID on other CPUs */
879 cpu
= smp_processor_id();
880 for_each_possible_cpu(i
) {
884 cpu_context(i
, user_mm
) = 0;
885 cpu_context(i
, kern_mm
) = 0;
891 /* Write Guest TLB Entry @ Index */
892 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
894 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
895 int index
= kvm_read_c0_guest_index(cop0
);
896 struct kvm_mips_tlb
*tlb
= NULL
;
897 unsigned long pc
= vcpu
->arch
.pc
;
899 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
900 kvm_debug("%s: illegal index: %d\n", __func__
, index
);
901 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
902 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
903 kvm_read_c0_guest_entrylo0(cop0
),
904 kvm_read_c0_guest_entrylo1(cop0
),
905 kvm_read_c0_guest_pagemask(cop0
));
906 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
909 tlb
= &vcpu
->arch
.guest_tlb
[index
];
911 kvm_mips_invalidate_guest_tlb(vcpu
, tlb
);
913 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
914 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
915 tlb
->tlb_lo
[0] = kvm_read_c0_guest_entrylo0(cop0
);
916 tlb
->tlb_lo
[1] = kvm_read_c0_guest_entrylo1(cop0
);
918 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
919 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
920 kvm_read_c0_guest_entrylo0(cop0
),
921 kvm_read_c0_guest_entrylo1(cop0
),
922 kvm_read_c0_guest_pagemask(cop0
));
927 /* Write Guest TLB Entry @ Random Index */
928 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
930 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
931 struct kvm_mips_tlb
*tlb
= NULL
;
932 unsigned long pc
= vcpu
->arch
.pc
;
935 get_random_bytes(&index
, sizeof(index
));
936 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
938 tlb
= &vcpu
->arch
.guest_tlb
[index
];
940 kvm_mips_invalidate_guest_tlb(vcpu
, tlb
);
942 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
943 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
944 tlb
->tlb_lo
[0] = kvm_read_c0_guest_entrylo0(cop0
);
945 tlb
->tlb_lo
[1] = kvm_read_c0_guest_entrylo1(cop0
);
947 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
948 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
949 kvm_read_c0_guest_entrylo0(cop0
),
950 kvm_read_c0_guest_entrylo1(cop0
));
955 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
957 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
958 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
959 unsigned long pc
= vcpu
->arch
.pc
;
962 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
964 kvm_write_c0_guest_index(cop0
, index
);
966 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
973 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
974 * @vcpu: Virtual CPU.
976 * Finds the mask of bits which are writable in the guest's Config1 CP0
977 * register, by userland (currently read-only to the guest).
979 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu
*vcpu
)
981 unsigned int mask
= 0;
983 /* Permit FPU to be present if FPU is supported */
984 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
))
985 mask
|= MIPS_CONF1_FP
;
991 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
992 * @vcpu: Virtual CPU.
994 * Finds the mask of bits which are writable in the guest's Config3 CP0
995 * register, by userland (currently read-only to the guest).
997 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu
*vcpu
)
999 /* Config4 and ULRI are optional */
1000 unsigned int mask
= MIPS_CONF_M
| MIPS_CONF3_ULRI
;
1002 /* Permit MSA to be present if MSA is supported */
1003 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
1004 mask
|= MIPS_CONF3_MSA
;
1010 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1011 * @vcpu: Virtual CPU.
1013 * Finds the mask of bits which are writable in the guest's Config4 CP0
1014 * register, by userland (currently read-only to the guest).
1016 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu
*vcpu
)
1018 /* Config5 is optional */
1019 unsigned int mask
= MIPS_CONF_M
;
1022 mask
|= (unsigned int)vcpu
->arch
.kscratch_enabled
<< 16;
1028 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1029 * @vcpu: Virtual CPU.
1031 * Finds the mask of bits which are writable in the guest's Config5 CP0
1032 * register, by the guest itself.
1034 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu
*vcpu
)
1036 unsigned int mask
= 0;
1038 /* Permit MSAEn changes if MSA supported and enabled */
1039 if (kvm_mips_guest_has_msa(&vcpu
->arch
))
1040 mask
|= MIPS_CONF5_MSAEN
;
1043 * Permit guest FPU mode changes if FPU is enabled and the relevant
1044 * feature exists according to FIR register.
1046 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
1048 mask
|= MIPS_CONF5_FRE
;
1049 /* We don't support UFR or UFE */
1055 enum emulation_result
kvm_mips_emulate_CP0(union mips_instruction inst
,
1056 u32
*opc
, u32 cause
,
1057 struct kvm_run
*run
,
1058 struct kvm_vcpu
*vcpu
)
1060 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1061 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1062 enum emulation_result er
= EMULATE_DONE
;
1064 unsigned long curr_pc
;
1068 * Update PC and hold onto current PC in case there is
1069 * an error and we want to rollback the PC
1071 curr_pc
= vcpu
->arch
.pc
;
1072 er
= update_pc(vcpu
, cause
);
1073 if (er
== EMULATE_FAIL
)
1076 if (inst
.co_format
.co
) {
1077 switch (inst
.co_format
.func
) {
1078 case tlbr_op
: /* Read indexed TLB entry */
1079 er
= kvm_mips_emul_tlbr(vcpu
);
1081 case tlbwi_op
: /* Write indexed */
1082 er
= kvm_mips_emul_tlbwi(vcpu
);
1084 case tlbwr_op
: /* Write random */
1085 er
= kvm_mips_emul_tlbwr(vcpu
);
1087 case tlbp_op
: /* TLB Probe */
1088 er
= kvm_mips_emul_tlbp(vcpu
);
1091 kvm_err("!!!COP0_RFE!!!\n");
1094 er
= kvm_mips_emul_eret(vcpu
);
1095 goto dont_update_pc
;
1097 er
= kvm_mips_emul_wait(vcpu
);
1101 rt
= inst
.c0r_format
.rt
;
1102 rd
= inst
.c0r_format
.rd
;
1103 sel
= inst
.c0r_format
.sel
;
1105 switch (inst
.c0r_format
.rs
) {
1107 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1108 cop0
->stat
[rd
][sel
]++;
1111 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1112 vcpu
->arch
.gprs
[rt
] =
1113 (s32
)kvm_mips_read_count(vcpu
);
1114 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
1115 vcpu
->arch
.gprs
[rt
] = 0x0;
1116 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1117 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1120 vcpu
->arch
.gprs
[rt
] = (s32
)cop0
->reg
[rd
][sel
];
1122 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1123 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1127 trace_kvm_hwr(vcpu
, KVM_TRACE_MFC0
,
1128 KVM_TRACE_COP0(rd
, sel
),
1129 vcpu
->arch
.gprs
[rt
]);
1133 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
1135 trace_kvm_hwr(vcpu
, KVM_TRACE_DMFC0
,
1136 KVM_TRACE_COP0(rd
, sel
),
1137 vcpu
->arch
.gprs
[rt
]);
1141 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1142 cop0
->stat
[rd
][sel
]++;
1144 trace_kvm_hwr(vcpu
, KVM_TRACE_MTC0
,
1145 KVM_TRACE_COP0(rd
, sel
),
1146 vcpu
->arch
.gprs
[rt
]);
1148 if ((rd
== MIPS_CP0_TLB_INDEX
)
1149 && (vcpu
->arch
.gprs
[rt
] >=
1150 KVM_MIPS_GUEST_TLB_SIZE
)) {
1151 kvm_err("Invalid TLB Index: %ld",
1152 vcpu
->arch
.gprs
[rt
]);
1156 #define C0_EBASE_CORE_MASK 0xff
1157 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
1158 /* Preserve CORE number */
1159 kvm_change_c0_guest_ebase(cop0
,
1160 ~(C0_EBASE_CORE_MASK
),
1161 vcpu
->arch
.gprs
[rt
]);
1162 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1163 kvm_read_c0_guest_ebase(cop0
));
1164 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
1166 vcpu
->arch
.gprs
[rt
] & KVM_ENTRYHI_ASID
;
1167 if (((kvm_read_c0_guest_entryhi(cop0
) &
1168 KVM_ENTRYHI_ASID
) != nasid
)) {
1169 trace_kvm_asid_change(vcpu
,
1170 kvm_read_c0_guest_entryhi(cop0
)
1175 * Flush entries from the GVA page
1177 * Guest user page table will get
1178 * flushed lazily on re-entry to guest
1179 * user if the guest ASID actually
1182 kvm_mips_flush_gva_pt(kern_mm
->pgd
,
1186 * Regenerate/invalidate kernel MMU
1188 * The user MMU context will be
1189 * regenerated lazily on re-entry to
1190 * guest user if the guest ASID actually
1194 cpu
= smp_processor_id();
1195 kvm_get_new_mmu_context(kern_mm
,
1197 for_each_possible_cpu(i
)
1199 cpu_context(i
, kern_mm
) = 0;
1202 kvm_write_c0_guest_entryhi(cop0
,
1203 vcpu
->arch
.gprs
[rt
]);
1205 /* Are we writing to COUNT */
1206 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1207 kvm_mips_write_count(vcpu
, vcpu
->arch
.gprs
[rt
]);
1209 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
1210 /* If we are writing to COMPARE */
1211 /* Clear pending timer interrupt, if any */
1212 kvm_mips_write_compare(vcpu
,
1213 vcpu
->arch
.gprs
[rt
],
1215 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
1216 unsigned int old_val
, val
, change
;
1218 old_val
= kvm_read_c0_guest_status(cop0
);
1219 val
= vcpu
->arch
.gprs
[rt
];
1220 change
= val
^ old_val
;
1222 /* Make sure that the NMI bit is never set */
1226 * Don't allow CU1 or FR to be set unless FPU
1227 * capability enabled and exists in guest
1230 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1231 val
&= ~(ST0_CU1
| ST0_FR
);
1234 * Also don't allow FR to be set if host doesn't
1237 if (!(current_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
1241 /* Handle changes in FPU mode */
1245 * FPU and Vector register state is made
1246 * UNPREDICTABLE by a change of FR, so don't
1247 * even bother saving it.
1249 if (change
& ST0_FR
)
1253 * If MSA state is already live, it is undefined
1254 * how it interacts with FR=0 FPU state, and we
1255 * don't want to hit reserved instruction
1256 * exceptions trying to save the MSA state later
1257 * when CU=1 && FR=1, so play it safe and save
1260 if (change
& ST0_CU1
&& !(val
& ST0_FR
) &&
1261 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1265 * Propagate CU1 (FPU enable) changes
1266 * immediately if the FPU context is already
1267 * loaded. When disabling we leave the context
1268 * loaded so it can be quickly enabled again in
1271 if (change
& ST0_CU1
&&
1272 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)
1273 change_c0_status(ST0_CU1
, val
);
1277 kvm_write_c0_guest_status(cop0
, val
);
1279 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1281 * If FPU present, we need CU1/FR bits to take
1282 * effect fairly soon.
1284 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1285 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1287 } else if ((rd
== MIPS_CP0_CONFIG
) && (sel
== 5)) {
1288 unsigned int old_val
, val
, change
, wrmask
;
1290 old_val
= kvm_read_c0_guest_config5(cop0
);
1291 val
= vcpu
->arch
.gprs
[rt
];
1293 /* Only a few bits are writable in Config5 */
1294 wrmask
= kvm_mips_config5_wrmask(vcpu
);
1295 change
= (val
^ old_val
) & wrmask
;
1296 val
= old_val
^ change
;
1299 /* Handle changes in FPU/MSA modes */
1303 * Propagate FRE changes immediately if the FPU
1304 * context is already loaded.
1306 if (change
& MIPS_CONF5_FRE
&&
1307 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)
1308 change_c0_config5(MIPS_CONF5_FRE
, val
);
1311 * Propagate MSAEn changes immediately if the
1312 * MSA context is already loaded. When disabling
1313 * we leave the context loaded so it can be
1314 * quickly enabled again in the near future.
1316 if (change
& MIPS_CONF5_MSAEN
&&
1317 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1318 change_c0_config5(MIPS_CONF5_MSAEN
,
1323 kvm_write_c0_guest_config5(cop0
, val
);
1324 } else if ((rd
== MIPS_CP0_CAUSE
) && (sel
== 0)) {
1325 u32 old_cause
, new_cause
;
1327 old_cause
= kvm_read_c0_guest_cause(cop0
);
1328 new_cause
= vcpu
->arch
.gprs
[rt
];
1329 /* Update R/W bits */
1330 kvm_change_c0_guest_cause(cop0
, 0x08800300,
1332 /* DC bit enabling/disabling timer? */
1333 if ((old_cause
^ new_cause
) & CAUSEF_DC
) {
1334 if (new_cause
& CAUSEF_DC
)
1335 kvm_mips_count_disable_cause(vcpu
);
1337 kvm_mips_count_enable_cause(vcpu
);
1339 } else if ((rd
== MIPS_CP0_HWRENA
) && (sel
== 0)) {
1340 u32 mask
= MIPS_HWRENA_CPUNUM
|
1341 MIPS_HWRENA_SYNCISTEP
|
1345 if (kvm_read_c0_guest_config3(cop0
) &
1347 mask
|= MIPS_HWRENA_ULR
;
1348 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
] & mask
;
1350 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
1351 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1352 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1358 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1359 vcpu
->arch
.pc
, rt
, rd
, sel
);
1360 trace_kvm_hwr(vcpu
, KVM_TRACE_DMTC0
,
1361 KVM_TRACE_COP0(rd
, sel
),
1362 vcpu
->arch
.gprs
[rt
]);
1367 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1368 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
1371 vcpu
->arch
.gprs
[rt
] =
1372 kvm_read_c0_guest_status(cop0
);
1374 if (inst
.mfmc0_format
.sc
) {
1375 kvm_debug("[%#lx] mfmc0_op: EI\n",
1377 kvm_set_c0_guest_status(cop0
, ST0_IE
);
1379 kvm_debug("[%#lx] mfmc0_op: DI\n",
1381 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
1388 u32 css
= cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
1390 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
1392 * We don't support any shadow register sets, so
1393 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1399 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
1400 vcpu
->arch
.gprs
[rt
]);
1401 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
1405 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1406 vcpu
->arch
.pc
, inst
.c0r_format
.rs
);
1413 /* Rollback PC only if emulation was unsuccessful */
1414 if (er
== EMULATE_FAIL
)
1415 vcpu
->arch
.pc
= curr_pc
;
1419 * This is for special instructions whose emulation
1420 * updates the PC, so do not overwrite the PC under
1427 enum emulation_result
kvm_mips_emulate_store(union mips_instruction inst
,
1429 struct kvm_run
*run
,
1430 struct kvm_vcpu
*vcpu
)
1432 enum emulation_result er
= EMULATE_DO_MMIO
;
1435 void *data
= run
->mmio
.data
;
1436 unsigned long curr_pc
;
1439 * Update PC and hold onto current PC in case there is
1440 * an error and we want to rollback the PC
1442 curr_pc
= vcpu
->arch
.pc
;
1443 er
= update_pc(vcpu
, cause
);
1444 if (er
== EMULATE_FAIL
)
1447 rt
= inst
.i_format
.rt
;
1449 switch (inst
.i_format
.opcode
) {
1452 if (bytes
> sizeof(run
->mmio
.data
)) {
1453 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1456 run
->mmio
.phys_addr
=
1457 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1459 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1463 run
->mmio
.len
= bytes
;
1464 run
->mmio
.is_write
= 1;
1465 vcpu
->mmio_needed
= 1;
1466 vcpu
->mmio_is_write
= 1;
1467 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
1468 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1469 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
1476 if (bytes
> sizeof(run
->mmio
.data
)) {
1477 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1480 run
->mmio
.phys_addr
=
1481 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1483 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1488 run
->mmio
.len
= bytes
;
1489 run
->mmio
.is_write
= 1;
1490 vcpu
->mmio_needed
= 1;
1491 vcpu
->mmio_is_write
= 1;
1492 *(u32
*) data
= vcpu
->arch
.gprs
[rt
];
1494 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1495 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1496 vcpu
->arch
.gprs
[rt
], *(u32
*) data
);
1501 if (bytes
> sizeof(run
->mmio
.data
)) {
1502 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1505 run
->mmio
.phys_addr
=
1506 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1508 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1513 run
->mmio
.len
= bytes
;
1514 run
->mmio
.is_write
= 1;
1515 vcpu
->mmio_needed
= 1;
1516 vcpu
->mmio_is_write
= 1;
1517 *(u16
*) data
= vcpu
->arch
.gprs
[rt
];
1519 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1520 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1521 vcpu
->arch
.gprs
[rt
], *(u32
*) data
);
1525 kvm_err("Store not yet supported (inst=0x%08x)\n",
1531 /* Rollback PC if emulation was unsuccessful */
1532 if (er
== EMULATE_FAIL
)
1533 vcpu
->arch
.pc
= curr_pc
;
1538 enum emulation_result
kvm_mips_emulate_load(union mips_instruction inst
,
1539 u32 cause
, struct kvm_run
*run
,
1540 struct kvm_vcpu
*vcpu
)
1542 enum emulation_result er
= EMULATE_DO_MMIO
;
1543 unsigned long curr_pc
;
1547 rt
= inst
.i_format
.rt
;
1548 op
= inst
.i_format
.opcode
;
1551 * Find the resume PC now while we have safe and easy access to the
1552 * prior branch instruction, and save it for
1553 * kvm_mips_complete_mmio_load() to restore later.
1555 curr_pc
= vcpu
->arch
.pc
;
1556 er
= update_pc(vcpu
, cause
);
1557 if (er
== EMULATE_FAIL
)
1559 vcpu
->arch
.io_pc
= vcpu
->arch
.pc
;
1560 vcpu
->arch
.pc
= curr_pc
;
1562 vcpu
->arch
.io_gpr
= rt
;
1567 if (bytes
> sizeof(run
->mmio
.data
)) {
1568 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1573 run
->mmio
.phys_addr
=
1574 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1576 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1581 run
->mmio
.len
= bytes
;
1582 run
->mmio
.is_write
= 0;
1583 vcpu
->mmio_needed
= 1;
1584 vcpu
->mmio_is_write
= 0;
1590 if (bytes
> sizeof(run
->mmio
.data
)) {
1591 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1596 run
->mmio
.phys_addr
=
1597 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1599 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1604 run
->mmio
.len
= bytes
;
1605 run
->mmio
.is_write
= 0;
1606 vcpu
->mmio_needed
= 1;
1607 vcpu
->mmio_is_write
= 0;
1610 vcpu
->mmio_needed
= 2;
1612 vcpu
->mmio_needed
= 1;
1619 if (bytes
> sizeof(run
->mmio
.data
)) {
1620 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1625 run
->mmio
.phys_addr
=
1626 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1628 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1633 run
->mmio
.len
= bytes
;
1634 run
->mmio
.is_write
= 0;
1635 vcpu
->mmio_is_write
= 0;
1638 vcpu
->mmio_needed
= 2;
1640 vcpu
->mmio_needed
= 1;
1645 kvm_err("Load not yet supported (inst=0x%08x)\n",
1654 enum emulation_result
kvm_mips_emulate_cache(union mips_instruction inst
,
1655 u32
*opc
, u32 cause
,
1656 struct kvm_run
*run
,
1657 struct kvm_vcpu
*vcpu
)
1659 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1660 enum emulation_result er
= EMULATE_DONE
;
1661 u32 cache
, op_inst
, op
, base
;
1663 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1665 unsigned long curr_pc
;
1668 * Update PC and hold onto current PC in case there is
1669 * an error and we want to rollback the PC
1671 curr_pc
= vcpu
->arch
.pc
;
1672 er
= update_pc(vcpu
, cause
);
1673 if (er
== EMULATE_FAIL
)
1676 base
= inst
.i_format
.rs
;
1677 op_inst
= inst
.i_format
.rt
;
1678 if (cpu_has_mips_r6
)
1679 offset
= inst
.spec3_format
.simmediate
;
1681 offset
= inst
.i_format
.simmediate
;
1682 cache
= op_inst
& CacheOp_Cache
;
1683 op
= op_inst
& CacheOp_Op
;
1685 va
= arch
->gprs
[base
] + offset
;
1687 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1688 cache
, op
, base
, arch
->gprs
[base
], offset
);
1691 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1692 * invalidate the caches entirely by stepping through all the
1695 if (op
== Index_Writeback_Inv
) {
1696 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1697 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
1698 arch
->gprs
[base
], offset
);
1700 if (cache
== Cache_D
)
1702 else if (cache
== Cache_I
)
1705 kvm_err("%s: unsupported CACHE INDEX operation\n",
1707 return EMULATE_FAIL
;
1710 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1711 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
1717 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
1718 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0 &&
1719 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
)) {
1720 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1721 __func__
, va
, vcpu
, read_c0_entryhi());
1726 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
1727 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
1730 /* If an entry already exists then skip */
1731 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0)
1735 * If address not in the guest TLB, then give the guest a fault,
1736 * the resulting handler will do the right thing
1738 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
1739 (kvm_read_c0_guest_entryhi
1740 (cop0
) & KVM_ENTRYHI_ASID
));
1743 vcpu
->arch
.host_cp0_badvaddr
= va
;
1744 vcpu
->arch
.pc
= curr_pc
;
1745 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
1748 goto dont_update_pc
;
1750 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1752 * Check if the entry is valid, if not then setup a TLB
1753 * invalid exception to the guest
1755 if (!TLB_IS_VALID(*tlb
, va
)) {
1756 vcpu
->arch
.host_cp0_badvaddr
= va
;
1757 vcpu
->arch
.pc
= curr_pc
;
1758 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1761 goto dont_update_pc
;
1764 * We fault an entry from the guest tlb to the
1767 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
)) {
1768 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1769 __func__
, va
, index
, vcpu
,
1777 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1778 cache
, op
, base
, arch
->gprs
[base
], offset
);
1786 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1787 if (op_inst
== Hit_Writeback_Inv_D
|| op_inst
== Hit_Invalidate_D
) {
1788 flush_dcache_line(va
);
1790 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1792 * Replace the CACHE instruction, with a SYNCI, not the same,
1795 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1797 } else if (op_inst
== Hit_Invalidate_I
) {
1798 flush_dcache_line(va
);
1799 flush_icache_line(va
);
1801 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1802 /* Replace the CACHE instruction, with a SYNCI */
1803 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1806 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1807 cache
, op
, base
, arch
->gprs
[base
], offset
);
1813 /* Rollback PC only if emulation was unsuccessful */
1814 if (er
== EMULATE_FAIL
)
1815 vcpu
->arch
.pc
= curr_pc
;
1819 * This is for exceptions whose emulation updates the PC, so do not
1820 * overwrite the PC under any circumstances
1826 enum emulation_result
kvm_mips_emulate_inst(u32 cause
, u32
*opc
,
1827 struct kvm_run
*run
,
1828 struct kvm_vcpu
*vcpu
)
1830 union mips_instruction inst
;
1831 enum emulation_result er
= EMULATE_DONE
;
1833 /* Fetch the instruction. */
1834 if (cause
& CAUSEF_BD
)
1837 inst
.word
= kvm_get_inst(opc
, vcpu
);
1839 switch (inst
.r_format
.opcode
) {
1841 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1846 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1853 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1856 #ifndef CONFIG_CPU_MIPSR6
1858 ++vcpu
->stat
.cache_exits
;
1859 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_CACHE
);
1860 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1864 switch (inst
.spec3_format
.func
) {
1866 ++vcpu
->stat
.cache_exits
;
1867 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_CACHE
);
1868 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
,
1879 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc
,
1881 kvm_arch_vcpu_dump_regs(vcpu
);
1889 enum emulation_result
kvm_mips_emulate_syscall(u32 cause
,
1891 struct kvm_run
*run
,
1892 struct kvm_vcpu
*vcpu
)
1894 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1895 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1896 enum emulation_result er
= EMULATE_DONE
;
1898 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1900 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1901 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1903 if (cause
& CAUSEF_BD
)
1904 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1906 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1908 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1910 kvm_change_c0_guest_cause(cop0
, (0xff),
1911 (EXCCODE_SYS
<< CAUSEB_EXCCODE
));
1913 /* Set PC to the exception entry point */
1914 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1917 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1924 enum emulation_result
kvm_mips_emulate_tlbmiss_ld(u32 cause
,
1926 struct kvm_run
*run
,
1927 struct kvm_vcpu
*vcpu
)
1929 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1930 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1931 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1932 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1934 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1936 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1937 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1939 if (cause
& CAUSEF_BD
)
1940 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1942 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1944 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1947 /* set pc to the exception entry point */
1948 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1951 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1954 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1957 kvm_change_c0_guest_cause(cop0
, (0xff),
1958 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1960 /* setup badvaddr, context and entryhi registers for the guest */
1961 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1962 /* XXXKYMA: is the context register used by linux??? */
1963 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1964 /* Blow away the shadow host TLBs */
1965 kvm_mips_flush_host_tlb(1);
1967 return EMULATE_DONE
;
1970 enum emulation_result
kvm_mips_emulate_tlbinv_ld(u32 cause
,
1972 struct kvm_run
*run
,
1973 struct kvm_vcpu
*vcpu
)
1975 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1976 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1977 unsigned long entryhi
=
1978 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1979 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1981 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1983 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1984 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1986 if (cause
& CAUSEF_BD
)
1987 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1989 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1991 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1994 /* set pc to the exception entry point */
1995 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1998 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2000 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2003 kvm_change_c0_guest_cause(cop0
, (0xff),
2004 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
2006 /* setup badvaddr, context and entryhi registers for the guest */
2007 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2008 /* XXXKYMA: is the context register used by linux??? */
2009 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2010 /* Blow away the shadow host TLBs */
2011 kvm_mips_flush_host_tlb(1);
2013 return EMULATE_DONE
;
2016 enum emulation_result
kvm_mips_emulate_tlbmiss_st(u32 cause
,
2018 struct kvm_run
*run
,
2019 struct kvm_vcpu
*vcpu
)
2021 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2022 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2023 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2024 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2026 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2028 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2029 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2031 if (cause
& CAUSEF_BD
)
2032 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2034 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2036 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2039 /* Set PC to the exception entry point */
2040 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
2042 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2044 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2047 kvm_change_c0_guest_cause(cop0
, (0xff),
2048 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
2050 /* setup badvaddr, context and entryhi registers for the guest */
2051 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2052 /* XXXKYMA: is the context register used by linux??? */
2053 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2054 /* Blow away the shadow host TLBs */
2055 kvm_mips_flush_host_tlb(1);
2057 return EMULATE_DONE
;
2060 enum emulation_result
kvm_mips_emulate_tlbinv_st(u32 cause
,
2062 struct kvm_run
*run
,
2063 struct kvm_vcpu
*vcpu
)
2065 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2066 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2067 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2068 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2070 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2072 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2073 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2075 if (cause
& CAUSEF_BD
)
2076 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2078 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2080 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2083 /* Set PC to the exception entry point */
2084 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2086 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2088 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2091 kvm_change_c0_guest_cause(cop0
, (0xff),
2092 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
2094 /* setup badvaddr, context and entryhi registers for the guest */
2095 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2096 /* XXXKYMA: is the context register used by linux??? */
2097 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2098 /* Blow away the shadow host TLBs */
2099 kvm_mips_flush_host_tlb(1);
2101 return EMULATE_DONE
;
2104 /* TLBMOD: store into address matching TLB with Dirty bit off */
2105 enum emulation_result
kvm_mips_handle_tlbmod(u32 cause
, u32
*opc
,
2106 struct kvm_run
*run
,
2107 struct kvm_vcpu
*vcpu
)
2109 enum emulation_result er
= EMULATE_DONE
;
2111 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2112 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2113 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2114 bool kernel
= KVM_GUEST_KERNEL_MODE(vcpu
);
2117 /* If address not in the guest TLB, then we are in trouble */
2118 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
2120 /* XXXKYMA Invalidate and retry */
2121 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
,
2123 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2125 kvm_mips_dump_guest_tlbs(vcpu
);
2126 kvm_mips_dump_host_tlbs();
2127 return EMULATE_FAIL
;
2131 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
2135 enum emulation_result
kvm_mips_emulate_tlbmod(u32 cause
,
2137 struct kvm_run
*run
,
2138 struct kvm_vcpu
*vcpu
)
2140 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2141 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2142 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2143 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2145 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2147 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2148 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2150 if (cause
& CAUSEF_BD
)
2151 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2153 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2155 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2158 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2160 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2162 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2165 kvm_change_c0_guest_cause(cop0
, (0xff),
2166 (EXCCODE_MOD
<< CAUSEB_EXCCODE
));
2168 /* setup badvaddr, context and entryhi registers for the guest */
2169 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2170 /* XXXKYMA: is the context register used by linux??? */
2171 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2172 /* Blow away the shadow host TLBs */
2173 kvm_mips_flush_host_tlb(1);
2175 return EMULATE_DONE
;
2178 enum emulation_result
kvm_mips_emulate_fpu_exc(u32 cause
,
2180 struct kvm_run
*run
,
2181 struct kvm_vcpu
*vcpu
)
2183 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2184 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2186 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2188 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2189 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2191 if (cause
& CAUSEF_BD
)
2192 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2194 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2198 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2200 kvm_change_c0_guest_cause(cop0
, (0xff),
2201 (EXCCODE_CPU
<< CAUSEB_EXCCODE
));
2202 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
2204 return EMULATE_DONE
;
2207 enum emulation_result
kvm_mips_emulate_ri_exc(u32 cause
,
2209 struct kvm_run
*run
,
2210 struct kvm_vcpu
*vcpu
)
2212 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2213 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2214 enum emulation_result er
= EMULATE_DONE
;
2216 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2218 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2219 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2221 if (cause
& CAUSEF_BD
)
2222 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2224 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2226 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
2228 kvm_change_c0_guest_cause(cop0
, (0xff),
2229 (EXCCODE_RI
<< CAUSEB_EXCCODE
));
2231 /* Set PC to the exception entry point */
2232 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2235 kvm_err("Trying to deliver RI when EXL is already set\n");
2242 enum emulation_result
kvm_mips_emulate_bp_exc(u32 cause
,
2244 struct kvm_run
*run
,
2245 struct kvm_vcpu
*vcpu
)
2247 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2248 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2249 enum emulation_result er
= EMULATE_DONE
;
2251 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2253 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2254 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2256 if (cause
& CAUSEF_BD
)
2257 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2259 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2261 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
2263 kvm_change_c0_guest_cause(cop0
, (0xff),
2264 (EXCCODE_BP
<< CAUSEB_EXCCODE
));
2266 /* Set PC to the exception entry point */
2267 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2270 kvm_err("Trying to deliver BP when EXL is already set\n");
2277 enum emulation_result
kvm_mips_emulate_trap_exc(u32 cause
,
2279 struct kvm_run
*run
,
2280 struct kvm_vcpu
*vcpu
)
2282 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2283 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2284 enum emulation_result er
= EMULATE_DONE
;
2286 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2288 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2289 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2291 if (cause
& CAUSEF_BD
)
2292 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2294 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2296 kvm_debug("Delivering TRAP @ pc %#lx\n", arch
->pc
);
2298 kvm_change_c0_guest_cause(cop0
, (0xff),
2299 (EXCCODE_TR
<< CAUSEB_EXCCODE
));
2301 /* Set PC to the exception entry point */
2302 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2305 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2312 enum emulation_result
kvm_mips_emulate_msafpe_exc(u32 cause
,
2314 struct kvm_run
*run
,
2315 struct kvm_vcpu
*vcpu
)
2317 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2318 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2319 enum emulation_result er
= EMULATE_DONE
;
2321 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2323 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2324 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2326 if (cause
& CAUSEF_BD
)
2327 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2329 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2331 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch
->pc
);
2333 kvm_change_c0_guest_cause(cop0
, (0xff),
2334 (EXCCODE_MSAFPE
<< CAUSEB_EXCCODE
));
2336 /* Set PC to the exception entry point */
2337 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2340 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2347 enum emulation_result
kvm_mips_emulate_fpe_exc(u32 cause
,
2349 struct kvm_run
*run
,
2350 struct kvm_vcpu
*vcpu
)
2352 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2353 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2354 enum emulation_result er
= EMULATE_DONE
;
2356 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2358 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2359 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2361 if (cause
& CAUSEF_BD
)
2362 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2364 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2366 kvm_debug("Delivering FPE @ pc %#lx\n", arch
->pc
);
2368 kvm_change_c0_guest_cause(cop0
, (0xff),
2369 (EXCCODE_FPE
<< CAUSEB_EXCCODE
));
2371 /* Set PC to the exception entry point */
2372 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2375 kvm_err("Trying to deliver FPE when EXL is already set\n");
2382 enum emulation_result
kvm_mips_emulate_msadis_exc(u32 cause
,
2384 struct kvm_run
*run
,
2385 struct kvm_vcpu
*vcpu
)
2387 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2388 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2389 enum emulation_result er
= EMULATE_DONE
;
2391 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2393 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2394 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2396 if (cause
& CAUSEF_BD
)
2397 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2399 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2401 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch
->pc
);
2403 kvm_change_c0_guest_cause(cop0
, (0xff),
2404 (EXCCODE_MSADIS
<< CAUSEB_EXCCODE
));
2406 /* Set PC to the exception entry point */
2407 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2410 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2417 enum emulation_result
kvm_mips_handle_ri(u32 cause
, u32
*opc
,
2418 struct kvm_run
*run
,
2419 struct kvm_vcpu
*vcpu
)
2421 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2422 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2423 enum emulation_result er
= EMULATE_DONE
;
2424 unsigned long curr_pc
;
2425 union mips_instruction inst
;
2428 * Update PC and hold onto current PC in case there is
2429 * an error and we want to rollback the PC
2431 curr_pc
= vcpu
->arch
.pc
;
2432 er
= update_pc(vcpu
, cause
);
2433 if (er
== EMULATE_FAIL
)
2436 /* Fetch the instruction. */
2437 if (cause
& CAUSEF_BD
)
2440 inst
.word
= kvm_get_inst(opc
, vcpu
);
2442 if (inst
.word
== KVM_INVALID_INST
) {
2443 kvm_err("%s: Cannot get inst @ %p\n", __func__
, opc
);
2444 return EMULATE_FAIL
;
2447 if (inst
.r_format
.opcode
== spec3_op
&&
2448 inst
.r_format
.func
== rdhwr_op
&&
2449 inst
.r_format
.rs
== 0 &&
2450 (inst
.r_format
.re
>> 3) == 0) {
2451 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2452 int rd
= inst
.r_format
.rd
;
2453 int rt
= inst
.r_format
.rt
;
2454 int sel
= inst
.r_format
.re
& 0x7;
2456 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2457 if (usermode
&& !(kvm_read_c0_guest_hwrena(cop0
) & BIT(rd
))) {
2458 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2463 case MIPS_HWR_CPUNUM
: /* CPU number */
2464 arch
->gprs
[rt
] = vcpu
->vcpu_id
;
2466 case MIPS_HWR_SYNCISTEP
: /* SYNCI length */
2467 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
2468 current_cpu_data
.icache
.linesz
);
2470 case MIPS_HWR_CC
: /* Read count register */
2471 arch
->gprs
[rt
] = (s32
)kvm_mips_read_count(vcpu
);
2473 case MIPS_HWR_CCRES
: /* Count register resolution */
2474 switch (current_cpu_data
.cputype
) {
2483 case MIPS_HWR_ULR
: /* Read UserLocal register */
2484 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
2488 kvm_debug("RDHWR %#x not supported @ %p\n", rd
, opc
);
2492 trace_kvm_hwr(vcpu
, KVM_TRACE_RDHWR
, KVM_TRACE_HWR(rd
, sel
),
2493 vcpu
->arch
.gprs
[rt
]);
2495 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2500 return EMULATE_DONE
;
2504 * Rollback PC (if in branch delay slot then the PC already points to
2505 * branch target), and pass the RI exception to the guest OS.
2507 vcpu
->arch
.pc
= curr_pc
;
2508 return kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
2511 enum emulation_result
kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
,
2512 struct kvm_run
*run
)
2514 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
2515 enum emulation_result er
= EMULATE_DONE
;
2517 if (run
->mmio
.len
> sizeof(*gpr
)) {
2518 kvm_err("Bad MMIO length: %d", run
->mmio
.len
);
2523 /* Restore saved resume PC */
2524 vcpu
->arch
.pc
= vcpu
->arch
.io_pc
;
2526 switch (run
->mmio
.len
) {
2528 *gpr
= *(s32
*) run
->mmio
.data
;
2532 if (vcpu
->mmio_needed
== 2)
2533 *gpr
= *(s16
*) run
->mmio
.data
;
2535 *gpr
= *(u16
*)run
->mmio
.data
;
2539 if (vcpu
->mmio_needed
== 2)
2540 *gpr
= *(s8
*) run
->mmio
.data
;
2542 *gpr
= *(u8
*) run
->mmio
.data
;
2550 static enum emulation_result
kvm_mips_emulate_exc(u32 cause
,
2552 struct kvm_run
*run
,
2553 struct kvm_vcpu
*vcpu
)
2555 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2556 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2557 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2558 enum emulation_result er
= EMULATE_DONE
;
2560 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2562 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2563 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2565 if (cause
& CAUSEF_BD
)
2566 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2568 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2570 kvm_change_c0_guest_cause(cop0
, (0xff),
2571 (exccode
<< CAUSEB_EXCCODE
));
2573 /* Set PC to the exception entry point */
2574 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2575 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2577 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2578 exccode
, kvm_read_c0_guest_epc(cop0
),
2579 kvm_read_c0_guest_badvaddr(cop0
));
2581 kvm_err("Trying to deliver EXC when EXL is already set\n");
2588 enum emulation_result
kvm_mips_check_privilege(u32 cause
,
2590 struct kvm_run
*run
,
2591 struct kvm_vcpu
*vcpu
)
2593 enum emulation_result er
= EMULATE_DONE
;
2594 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2595 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
2597 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2606 case EXCCODE_MSAFPE
:
2608 case EXCCODE_MSADIS
:
2612 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
2613 er
= EMULATE_PRIV_FAIL
;
2621 * We we are accessing Guest kernel space, then send an
2622 * address error exception to the guest
2624 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2625 kvm_debug("%s: LD MISS @ %#lx\n", __func__
,
2628 cause
|= (EXCCODE_ADEL
<< CAUSEB_EXCCODE
);
2629 er
= EMULATE_PRIV_FAIL
;
2635 * We we are accessing Guest kernel space, then send an
2636 * address error exception to the guest
2638 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2639 kvm_debug("%s: ST MISS @ %#lx\n", __func__
,
2642 cause
|= (EXCCODE_ADES
<< CAUSEB_EXCCODE
);
2643 er
= EMULATE_PRIV_FAIL
;
2648 kvm_debug("%s: address error ST @ %#lx\n", __func__
,
2650 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2652 cause
|= (EXCCODE_TLBS
<< CAUSEB_EXCCODE
);
2654 er
= EMULATE_PRIV_FAIL
;
2657 kvm_debug("%s: address error LD @ %#lx\n", __func__
,
2659 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2661 cause
|= (EXCCODE_TLBL
<< CAUSEB_EXCCODE
);
2663 er
= EMULATE_PRIV_FAIL
;
2666 er
= EMULATE_PRIV_FAIL
;
2671 if (er
== EMULATE_PRIV_FAIL
)
2672 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
2678 * User Address (UA) fault, this could happen if
2679 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2680 * case we pass on the fault to the guest kernel and let it handle it.
2681 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2682 * case we inject the TLB from the Guest TLB into the shadow host TLB
2684 enum emulation_result
kvm_mips_handle_tlbmiss(u32 cause
,
2686 struct kvm_run
*run
,
2687 struct kvm_vcpu
*vcpu
)
2689 enum emulation_result er
= EMULATE_DONE
;
2690 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2691 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
2694 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2695 vcpu
->arch
.host_cp0_badvaddr
);
2698 * KVM would not have got the exception if this entry was valid in the
2699 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2700 * send the guest an exception. The guest exc handler should then inject
2701 * an entry into the guest TLB.
2703 index
= kvm_mips_guest_tlb_lookup(vcpu
,
2705 (kvm_read_c0_guest_entryhi(vcpu
->arch
.cop0
) &
2708 if (exccode
== EXCCODE_TLBL
) {
2709 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
2710 } else if (exccode
== EXCCODE_TLBS
) {
2711 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
2713 kvm_err("%s: invalid exc code: %d\n", __func__
,
2718 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
2721 * Check if the entry is valid, if not then setup a TLB invalid
2722 * exception to the guest
2724 if (!TLB_IS_VALID(*tlb
, va
)) {
2725 if (exccode
== EXCCODE_TLBL
) {
2726 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
2728 } else if (exccode
== EXCCODE_TLBS
) {
2729 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
2732 kvm_err("%s: invalid exc code: %d\n", __func__
,
2737 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2738 tlb
->tlb_hi
, tlb
->tlb_lo
[0], tlb
->tlb_lo
[1]);
2740 * OK we have a Guest TLB entry, now inject it into the
2743 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
)) {
2744 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2745 __func__
, va
, index
, vcpu
,