]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mips/kvm/emulate.c
KVM: MIPS: Use CP0_BadInstr[P] for emulation
[mirror_ubuntu-artful-kernel.git] / arch / mips / kvm / emulate.c
CommitLineData
e685c689 1/*
d116e812
DCZ
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
e685c689
SL
11
12#include <linux/errno.h>
13#include <linux/err.h>
e30492bb 14#include <linux/ktime.h>
e685c689 15#include <linux/kvm_host.h>
e685c689
SL
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
f4956f62 22#include <asm/cacheops.h>
e685c689
SL
23#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
d7d5b05f
DCZ
32#include "interrupt.h"
33#include "commpage.h"
e685c689
SL
34
35#include "trace.h"
36
37/*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
122e51d4
JH
41static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
42 unsigned long *out)
e685c689
SL
43{
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
122e51d4
JH
48 long nextpc;
49 int err;
e685c689 50
122e51d4
JH
51 if (epc & 3) {
52 kvm_err("%s: unaligned epc\n", __func__);
53 return -EINVAL;
54 }
e685c689 55
d116e812 56 /* Read the instruction */
6a97c775 57 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
122e51d4
JH
58 if (err)
59 return err;
e685c689
SL
60
61 switch (insn.i_format.opcode) {
d116e812 62 /* jr and jalr are in r_format format. */
e685c689
SL
63 case spec_op:
64 switch (insn.r_format.func) {
65 case jalr_op:
66 arch->gprs[insn.r_format.rd] = epc + 8;
67 /* Fall through */
68 case jr_op:
69 nextpc = arch->gprs[insn.r_format.rs];
70 break;
122e51d4
JH
71 default:
72 return -EINVAL;
e685c689
SL
73 }
74 break;
75
76 /*
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80 */
81 case bcond_op:
82 switch (insn.i_format.rt) {
83 case bltz_op:
84 case bltzl_op:
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 nextpc = epc;
90 break;
91
92 case bgez_op:
93 case bgezl_op:
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
96 else
97 epc += 8;
98 nextpc = epc;
99 break;
100
101 case bltzal_op:
102 case bltzall_op:
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
106 else
107 epc += 8;
108 nextpc = epc;
109 break;
110
111 case bgezal_op:
112 case bgezall_op:
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
116 else
117 epc += 8;
118 nextpc = epc;
119 break;
120 case bposge32_op:
122e51d4
JH
121 if (!cpu_has_dsp) {
122 kvm_err("%s: DSP branch but not DSP ASE\n",
123 __func__);
124 return -EINVAL;
125 }
e685c689
SL
126
127 dspcontrol = rddsp(0x01);
128
d116e812 129 if (dspcontrol >= 32)
e685c689 130 epc = epc + 4 + (insn.i_format.simmediate << 2);
d116e812 131 else
e685c689
SL
132 epc += 8;
133 nextpc = epc;
134 break;
122e51d4
JH
135 default:
136 return -EINVAL;
e685c689
SL
137 }
138 break;
139
d116e812 140 /* These are unconditional and in j_format. */
e685c689
SL
141 case jal_op:
142 arch->gprs[31] = instpc + 8;
143 case j_op:
144 epc += 4;
145 epc >>= 28;
146 epc <<= 28;
147 epc |= (insn.j_format.target << 2);
148 nextpc = epc;
149 break;
150
d116e812 151 /* These are conditional and in i_format. */
e685c689
SL
152 case beq_op:
153 case beql_op:
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
157 else
158 epc += 8;
159 nextpc = epc;
160 break;
161
162 case bne_op:
163 case bnel_op:
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
167 else
168 epc += 8;
169 nextpc = epc;
170 break;
171
2e0badfa
JH
172 case blez_op: /* POP06 */
173#ifndef CONFIG_CPU_MIPSR6
174 case blezl_op: /* removed in R6 */
175#endif
176 if (insn.i_format.rt != 0)
177 goto compact_branch;
e685c689
SL
178 if ((long)arch->gprs[insn.i_format.rs] <= 0)
179 epc = epc + 4 + (insn.i_format.simmediate << 2);
180 else
181 epc += 8;
182 nextpc = epc;
183 break;
184
2e0badfa
JH
185 case bgtz_op: /* POP07 */
186#ifndef CONFIG_CPU_MIPSR6
187 case bgtzl_op: /* removed in R6 */
188#endif
189 if (insn.i_format.rt != 0)
190 goto compact_branch;
e685c689
SL
191 if ((long)arch->gprs[insn.i_format.rs] > 0)
192 epc = epc + 4 + (insn.i_format.simmediate << 2);
193 else
194 epc += 8;
195 nextpc = epc;
196 break;
197
d116e812 198 /* And now the FPA/cp1 branch instructions. */
e685c689 199 case cop1_op:
6ad78a5c 200 kvm_err("%s: unsupported cop1_op\n", __func__);
122e51d4 201 return -EINVAL;
2e0badfa
JH
202
203#ifdef CONFIG_CPU_MIPSR6
204 /* R6 added the following compact branches with forbidden slots */
205 case blezl_op: /* POP26 */
206 case bgtzl_op: /* POP27 */
207 /* only rt == 0 isn't compact branch */
208 if (insn.i_format.rt != 0)
209 goto compact_branch;
122e51d4 210 return -EINVAL;
2e0badfa
JH
211 case pop10_op:
212 case pop30_op:
213 /* only rs == rt == 0 is reserved, rest are compact branches */
214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
215 goto compact_branch;
122e51d4 216 return -EINVAL;
2e0badfa
JH
217 case pop66_op:
218 case pop76_op:
219 /* only rs == 0 isn't compact branch */
220 if (insn.i_format.rs != 0)
221 goto compact_branch;
122e51d4 222 return -EINVAL;
2e0badfa
JH
223compact_branch:
224 /*
225 * If we've hit an exception on the forbidden slot, then
226 * the branch must not have been taken.
227 */
228 epc += 8;
229 nextpc = epc;
230 break;
231#else
232compact_branch:
122e51d4 233 /* Fall through - Compact branches not supported before R6 */
2e0badfa 234#endif
122e51d4
JH
235 default:
236 return -EINVAL;
e685c689
SL
237 }
238
122e51d4
JH
239 *out = nextpc;
240 return 0;
e685c689
SL
241}
242
bdb7ed86 243enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
e685c689 244{
122e51d4 245 int err;
e685c689
SL
246
247 if (cause & CAUSEF_BD) {
122e51d4
JH
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
249 &vcpu->arch.pc);
250 if (err)
251 return EMULATE_FAIL;
252 } else {
e685c689 253 vcpu->arch.pc += 4;
122e51d4 254 }
e685c689
SL
255
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
257
122e51d4 258 return EMULATE_DONE;
e685c689
SL
259}
260
6a97c775
JH
261/**
262 * kvm_get_badinstr() - Get bad instruction encoding.
263 * @opc: Guest pointer to faulting instruction.
264 * @vcpu: KVM VCPU information.
265 *
266 * Gets the instruction encoding of the faulting instruction, using the saved
267 * BadInstr register value if it exists, otherwise falling back to reading guest
268 * memory at @opc.
269 *
270 * Returns: The instruction encoding of the faulting instruction.
271 */
272int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
273{
274 if (cpu_has_badinstr) {
275 *out = vcpu->arch.host_cp0_badinstr;
276 return 0;
277 } else {
278 return kvm_get_inst(opc, vcpu, out);
279 }
280}
281
282/**
283 * kvm_get_badinstrp() - Get bad prior instruction encoding.
284 * @opc: Guest pointer to prior faulting instruction.
285 * @vcpu: KVM VCPU information.
286 *
287 * Gets the instruction encoding of the prior faulting instruction (the branch
288 * containing the delay slot which faulted), using the saved BadInstrP register
289 * value if it exists, otherwise falling back to reading guest memory at @opc.
290 *
291 * Returns: The instruction encoding of the prior faulting instruction.
292 */
293int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
294{
295 if (cpu_has_badinstrp) {
296 *out = vcpu->arch.host_cp0_badinstrp;
297 return 0;
298 } else {
299 return kvm_get_inst(opc, vcpu, out);
300 }
301}
302
e30492bb
JH
303/**
304 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
305 * @vcpu: Virtual CPU.
e685c689 306 *
f8239342
JH
307 * Returns: 1 if the CP0_Count timer is disabled by either the guest
308 * CP0_Cause.DC bit or the count_ctl.DC bit.
e30492bb 309 * 0 otherwise (in which case CP0_Count timer is running).
e685c689 310 */
e30492bb 311static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
e685c689
SL
312{
313 struct mips_coproc *cop0 = vcpu->arch.cop0;
d116e812 314
f8239342
JH
315 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
316 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
e30492bb 317}
e685c689 318
e30492bb
JH
319/**
320 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
321 *
322 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
323 *
324 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
325 */
bdb7ed86 326static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
e30492bb
JH
327{
328 s64 now_ns, periods;
329 u64 delta;
330
331 now_ns = ktime_to_ns(now);
332 delta = now_ns + vcpu->arch.count_dyn_bias;
333
334 if (delta >= vcpu->arch.count_period) {
335 /* If delta is out of safe range the bias needs adjusting */
336 periods = div64_s64(now_ns, vcpu->arch.count_period);
337 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
338 /* Recalculate delta with new bias */
339 delta = now_ns + vcpu->arch.count_dyn_bias;
e685c689
SL
340 }
341
e30492bb
JH
342 /*
343 * We've ensured that:
344 * delta < count_period
345 *
346 * Therefore the intermediate delta*count_hz will never overflow since
347 * at the boundary condition:
348 * delta = count_period
349 * delta = NSEC_PER_SEC * 2^32 / count_hz
350 * delta * count_hz = NSEC_PER_SEC * 2^32
351 */
352 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
353}
354
f8239342
JH
355/**
356 * kvm_mips_count_time() - Get effective current time.
357 * @vcpu: Virtual CPU.
358 *
359 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
360 * except when the master disable bit is set in count_ctl, in which case it is
361 * count_resume, i.e. the time that the count was disabled.
362 *
363 * Returns: Effective monotonic ktime for CP0_Count.
364 */
365static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
366{
367 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
368 return vcpu->arch.count_resume;
369
370 return ktime_get();
371}
372
e30492bb
JH
373/**
374 * kvm_mips_read_count_running() - Read the current count value as if running.
375 * @vcpu: Virtual CPU.
376 * @now: Kernel time to read CP0_Count at.
377 *
378 * Returns the current guest CP0_Count register at time @now and handles if the
379 * timer interrupt is pending and hasn't been handled yet.
380 *
381 * Returns: The current value of the guest CP0_Count register.
382 */
bdb7ed86 383static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
e30492bb 384{
4355c44f
JH
385 struct mips_coproc *cop0 = vcpu->arch.cop0;
386 ktime_t expires, threshold;
8cffd197 387 u32 count, compare;
e30492bb
JH
388 int running;
389
4355c44f
JH
390 /* Calculate the biased and scaled guest CP0_Count */
391 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
392 compare = kvm_read_c0_guest_compare(cop0);
393
394 /*
395 * Find whether CP0_Count has reached the closest timer interrupt. If
396 * not, we shouldn't inject it.
397 */
8cffd197 398 if ((s32)(count - compare) < 0)
4355c44f
JH
399 return count;
400
401 /*
402 * The CP0_Count we're going to return has already reached the closest
403 * timer interrupt. Quickly check if it really is a new interrupt by
404 * looking at whether the interval until the hrtimer expiry time is
405 * less than 1/4 of the timer period.
406 */
e30492bb 407 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
4355c44f
JH
408 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
409 if (ktime_before(expires, threshold)) {
e30492bb
JH
410 /*
411 * Cancel it while we handle it so there's no chance of
412 * interference with the timeout handler.
413 */
414 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
415
416 /* Nothing should be waiting on the timeout */
417 kvm_mips_callbacks->queue_timer_int(vcpu);
418
419 /*
420 * Restart the timer if it was running based on the expiry time
421 * we read, so that we don't push it back 2 periods.
422 */
423 if (running) {
424 expires = ktime_add_ns(expires,
425 vcpu->arch.count_period);
426 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
427 HRTIMER_MODE_ABS);
428 }
429 }
430
4355c44f 431 return count;
e30492bb
JH
432}
433
434/**
435 * kvm_mips_read_count() - Read the current count value.
436 * @vcpu: Virtual CPU.
437 *
438 * Read the current guest CP0_Count value, taking into account whether the timer
439 * is stopped.
440 *
441 * Returns: The current guest CP0_Count value.
442 */
bdb7ed86 443u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
e30492bb
JH
444{
445 struct mips_coproc *cop0 = vcpu->arch.cop0;
446
447 /* If count disabled just read static copy of count */
448 if (kvm_mips_count_disabled(vcpu))
449 return kvm_read_c0_guest_count(cop0);
450
451 return kvm_mips_read_count_running(vcpu, ktime_get());
452}
453
454/**
455 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
456 * @vcpu: Virtual CPU.
457 * @count: Output pointer for CP0_Count value at point of freeze.
458 *
459 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
460 * at the point it was frozen. It is guaranteed that any pending interrupts at
461 * the point it was frozen are handled, and none after that point.
462 *
463 * This is useful where the time/CP0_Count is needed in the calculation of the
464 * new parameters.
465 *
466 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
467 *
468 * Returns: The ktime at the point of freeze.
469 */
bdb7ed86 470static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
e30492bb
JH
471{
472 ktime_t now;
473
474 /* stop hrtimer before finding time */
475 hrtimer_cancel(&vcpu->arch.comparecount_timer);
476 now = ktime_get();
477
478 /* find count at this point and handle pending hrtimer */
479 *count = kvm_mips_read_count_running(vcpu, now);
480
481 return now;
482}
483
e30492bb
JH
484/**
485 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
486 * @vcpu: Virtual CPU.
487 * @now: ktime at point of resume.
488 * @count: CP0_Count at point of resume.
489 *
490 * Resumes the timer and updates the timer expiry based on @now and @count.
491 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
492 * parameters need to be changed.
493 *
494 * It is guaranteed that a timer interrupt immediately after resume will be
495 * handled, but not if CP_Compare is exactly at @count. That case is already
496 * handled by kvm_mips_freeze_timer().
497 *
498 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
499 */
500static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
bdb7ed86 501 ktime_t now, u32 count)
e30492bb
JH
502{
503 struct mips_coproc *cop0 = vcpu->arch.cop0;
8cffd197 504 u32 compare;
e30492bb
JH
505 u64 delta;
506 ktime_t expire;
507
508 /* Calculate timeout (wrap 0 to 2^32) */
509 compare = kvm_read_c0_guest_compare(cop0);
8cffd197 510 delta = (u64)(u32)(compare - count - 1) + 1;
e30492bb
JH
511 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
512 expire = ktime_add_ns(now, delta);
513
514 /* Update hrtimer to use new timeout */
515 hrtimer_cancel(&vcpu->arch.comparecount_timer);
516 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
517}
518
e30492bb
JH
519/**
520 * kvm_mips_write_count() - Modify the count and update timer.
521 * @vcpu: Virtual CPU.
522 * @count: Guest CP0_Count value to set.
523 *
524 * Sets the CP0_Count value and updates the timer accordingly.
525 */
bdb7ed86 526void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
e30492bb
JH
527{
528 struct mips_coproc *cop0 = vcpu->arch.cop0;
529 ktime_t now;
530
531 /* Calculate bias */
f8239342 532 now = kvm_mips_count_time(vcpu);
e30492bb
JH
533 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
534
535 if (kvm_mips_count_disabled(vcpu))
536 /* The timer's disabled, adjust the static count */
537 kvm_write_c0_guest_count(cop0, count);
538 else
539 /* Update timeout */
540 kvm_mips_resume_hrtimer(vcpu, now, count);
541}
542
543/**
544 * kvm_mips_init_count() - Initialise timer.
545 * @vcpu: Virtual CPU.
546 *
547 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
548 * it going if it's enabled.
549 */
550void kvm_mips_init_count(struct kvm_vcpu *vcpu)
551{
552 /* 100 MHz */
553 vcpu->arch.count_hz = 100*1000*1000;
554 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
555 vcpu->arch.count_hz);
556 vcpu->arch.count_dyn_bias = 0;
557
558 /* Starting at 0 */
559 kvm_mips_write_count(vcpu, 0);
560}
561
f74a8e22
JH
562/**
563 * kvm_mips_set_count_hz() - Update the frequency of the timer.
564 * @vcpu: Virtual CPU.
565 * @count_hz: Frequency of CP0_Count timer in Hz.
566 *
567 * Change the frequency of the CP0_Count timer. This is done atomically so that
568 * CP0_Count is continuous and no timer interrupt is lost.
569 *
570 * Returns: -EINVAL if @count_hz is out of range.
571 * 0 on success.
572 */
573int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
574{
575 struct mips_coproc *cop0 = vcpu->arch.cop0;
576 int dc;
577 ktime_t now;
578 u32 count;
579
580 /* ensure the frequency is in a sensible range... */
581 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
582 return -EINVAL;
583 /* ... and has actually changed */
584 if (vcpu->arch.count_hz == count_hz)
585 return 0;
586
587 /* Safely freeze timer so we can keep it continuous */
588 dc = kvm_mips_count_disabled(vcpu);
589 if (dc) {
590 now = kvm_mips_count_time(vcpu);
591 count = kvm_read_c0_guest_count(cop0);
592 } else {
593 now = kvm_mips_freeze_hrtimer(vcpu, &count);
594 }
595
596 /* Update the frequency */
597 vcpu->arch.count_hz = count_hz;
598 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
599 vcpu->arch.count_dyn_bias = 0;
600
601 /* Calculate adjusted bias so dynamic count is unchanged */
602 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
603
604 /* Update and resume hrtimer */
605 if (!dc)
606 kvm_mips_resume_hrtimer(vcpu, now, count);
607 return 0;
608}
609
e30492bb
JH
610/**
611 * kvm_mips_write_compare() - Modify compare and update timer.
612 * @vcpu: Virtual CPU.
613 * @compare: New CP0_Compare value.
b45bacd2 614 * @ack: Whether to acknowledge timer interrupt.
e30492bb
JH
615 *
616 * Update CP0_Compare to a new value and update the timeout.
b45bacd2
JH
617 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
618 * any pending timer interrupt is preserved.
e30492bb 619 */
bdb7ed86 620void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
e30492bb
JH
621{
622 struct mips_coproc *cop0 = vcpu->arch.cop0;
b45bacd2
JH
623 int dc;
624 u32 old_compare = kvm_read_c0_guest_compare(cop0);
625 ktime_t now;
8cffd197 626 u32 count;
e30492bb
JH
627
628 /* if unchanged, must just be an ack */
b45bacd2
JH
629 if (old_compare == compare) {
630 if (!ack)
631 return;
632 kvm_mips_callbacks->dequeue_timer_int(vcpu);
633 kvm_write_c0_guest_compare(cop0, compare);
e30492bb 634 return;
b45bacd2
JH
635 }
636
637 /* freeze_hrtimer() takes care of timer interrupts <= count */
638 dc = kvm_mips_count_disabled(vcpu);
639 if (!dc)
640 now = kvm_mips_freeze_hrtimer(vcpu, &count);
641
642 if (ack)
643 kvm_mips_callbacks->dequeue_timer_int(vcpu);
e30492bb 644
e30492bb
JH
645 kvm_write_c0_guest_compare(cop0, compare);
646
b45bacd2
JH
647 /* resume_hrtimer() takes care of timer interrupts > count */
648 if (!dc)
649 kvm_mips_resume_hrtimer(vcpu, now, count);
e30492bb
JH
650}
651
652/**
653 * kvm_mips_count_disable() - Disable count.
654 * @vcpu: Virtual CPU.
655 *
656 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
657 * time will be handled but not after.
658 *
f8239342
JH
659 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
660 * count_ctl.DC has been set (count disabled).
e30492bb
JH
661 *
662 * Returns: The time that the timer was stopped.
663 */
664static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
665{
666 struct mips_coproc *cop0 = vcpu->arch.cop0;
8cffd197 667 u32 count;
e30492bb
JH
668 ktime_t now;
669
670 /* Stop hrtimer */
671 hrtimer_cancel(&vcpu->arch.comparecount_timer);
672
673 /* Set the static count from the dynamic count, handling pending TI */
674 now = ktime_get();
675 count = kvm_mips_read_count_running(vcpu, now);
676 kvm_write_c0_guest_count(cop0, count);
677
678 return now;
679}
680
681/**
682 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
683 * @vcpu: Virtual CPU.
684 *
685 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
f8239342
JH
686 * before the final stop time will be handled if the timer isn't disabled by
687 * count_ctl.DC, but not after.
e30492bb
JH
688 *
689 * Assumes CP0_Cause.DC is clear (count enabled).
690 */
691void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
692{
693 struct mips_coproc *cop0 = vcpu->arch.cop0;
694
695 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
f8239342
JH
696 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
697 kvm_mips_count_disable(vcpu);
e30492bb
JH
698}
699
700/**
701 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
702 * @vcpu: Virtual CPU.
703 *
704 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
f8239342
JH
705 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
706 * potentially before even returning, so the caller should be careful with
707 * ordering of CP0_Cause modifications so as not to lose it.
e30492bb
JH
708 *
709 * Assumes CP0_Cause.DC is set (count disabled).
710 */
711void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
712{
713 struct mips_coproc *cop0 = vcpu->arch.cop0;
8cffd197 714 u32 count;
e30492bb
JH
715
716 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
717
718 /*
719 * Set the dynamic count to match the static count.
f8239342
JH
720 * This starts the hrtimer if count_ctl.DC allows it.
721 * Otherwise it conveniently updates the biases.
e30492bb
JH
722 */
723 count = kvm_read_c0_guest_count(cop0);
724 kvm_mips_write_count(vcpu, count);
725}
726
f8239342
JH
727/**
728 * kvm_mips_set_count_ctl() - Update the count control KVM register.
729 * @vcpu: Virtual CPU.
730 * @count_ctl: Count control register new value.
731 *
732 * Set the count control KVM register. The timer is updated accordingly.
733 *
734 * Returns: -EINVAL if reserved bits are set.
735 * 0 on success.
736 */
737int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
738{
739 struct mips_coproc *cop0 = vcpu->arch.cop0;
740 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
741 s64 delta;
742 ktime_t expire, now;
8cffd197 743 u32 count, compare;
f8239342
JH
744
745 /* Only allow defined bits to be changed */
746 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
747 return -EINVAL;
748
749 /* Apply new value */
750 vcpu->arch.count_ctl = count_ctl;
751
752 /* Master CP0_Count disable */
753 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
754 /* Is CP0_Cause.DC already disabling CP0_Count? */
755 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
756 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
757 /* Just record the current time */
758 vcpu->arch.count_resume = ktime_get();
759 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
760 /* disable timer and record current time */
761 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
762 } else {
763 /*
764 * Calculate timeout relative to static count at resume
765 * time (wrap 0 to 2^32).
766 */
767 count = kvm_read_c0_guest_count(cop0);
768 compare = kvm_read_c0_guest_compare(cop0);
8cffd197 769 delta = (u64)(u32)(compare - count - 1) + 1;
f8239342
JH
770 delta = div_u64(delta * NSEC_PER_SEC,
771 vcpu->arch.count_hz);
772 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
773
774 /* Handle pending interrupt */
775 now = ktime_get();
776 if (ktime_compare(now, expire) >= 0)
777 /* Nothing should be waiting on the timeout */
778 kvm_mips_callbacks->queue_timer_int(vcpu);
779
780 /* Resume hrtimer without changing bias */
781 count = kvm_mips_read_count_running(vcpu, now);
782 kvm_mips_resume_hrtimer(vcpu, now, count);
783 }
784 }
785
786 return 0;
787}
788
789/**
790 * kvm_mips_set_count_resume() - Update the count resume KVM register.
791 * @vcpu: Virtual CPU.
792 * @count_resume: Count resume register new value.
793 *
794 * Set the count resume KVM register.
795 *
796 * Returns: -EINVAL if out of valid range (0..now).
797 * 0 on success.
798 */
799int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
800{
801 /*
802 * It doesn't make sense for the resume time to be in the future, as it
803 * would be possible for the next interrupt to be more than a full
804 * period in the future.
805 */
806 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
807 return -EINVAL;
808
809 vcpu->arch.count_resume = ns_to_ktime(count_resume);
810 return 0;
811}
812
e30492bb
JH
813/**
814 * kvm_mips_count_timeout() - Push timer forward on timeout.
815 * @vcpu: Virtual CPU.
816 *
817 * Handle an hrtimer event by push the hrtimer forward a period.
818 *
819 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
820 */
821enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
822{
823 /* Add the Count period to the current expiry time */
824 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
825 vcpu->arch.count_period);
826 return HRTIMER_RESTART;
e685c689
SL
827}
828
829enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
830{
831 struct mips_coproc *cop0 = vcpu->arch.cop0;
832 enum emulation_result er = EMULATE_DONE;
833
ede5f3e7
JH
834 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
835 kvm_clear_c0_guest_status(cop0, ST0_ERL);
836 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
837 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
e685c689
SL
838 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
839 kvm_read_c0_guest_epc(cop0));
840 kvm_clear_c0_guest_status(cop0, ST0_EXL);
841 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
842
e685c689 843 } else {
6ad78a5c
DCZ
844 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
845 vcpu->arch.pc);
e685c689
SL
846 er = EMULATE_FAIL;
847 }
848
849 return er;
850}
851
852enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
853{
e685c689
SL
854 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
855 vcpu->arch.pending_exceptions);
856
857 ++vcpu->stat.wait_exits;
1e09e86a 858 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
e685c689
SL
859 if (!vcpu->arch.pending_exceptions) {
860 vcpu->arch.wait = 1;
861 kvm_vcpu_block(vcpu);
862
d116e812
DCZ
863 /*
864 * We we are runnable, then definitely go off to user space to
865 * check if any I/O interrupts are pending.
e685c689
SL
866 */
867 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
868 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
869 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
870 }
871 }
872
d98403a5 873 return EMULATE_DONE;
e685c689
SL
874}
875
d116e812
DCZ
876/*
877 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
878 * we can catch this, if things ever change
e685c689
SL
879 */
880enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
881{
882 struct mips_coproc *cop0 = vcpu->arch.cop0;
8cffd197 883 unsigned long pc = vcpu->arch.pc;
e685c689 884
8cffd197 885 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
d98403a5 886 return EMULATE_FAIL;
e685c689
SL
887}
888
91e4f1b6
JH
889/**
890 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
891 * @vcpu: VCPU with changed mappings.
892 * @tlb: TLB entry being removed.
893 *
894 * This is called to indicate a single change in guest MMU mappings, so that we
895 * can arrange TLB flushes on this and other CPUs.
896 */
897static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
898 struct kvm_mips_tlb *tlb)
899{
c550d539
JH
900 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
901 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
91e4f1b6
JH
902 int cpu, i;
903 bool user;
904
905 /* No need to flush for entries which are already invalid */
906 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
907 return;
aba85929
JH
908 /* Don't touch host kernel page tables or TLB mappings */
909 if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
910 return;
91e4f1b6
JH
911 /* User address space doesn't need flushing for KSeg2/3 changes */
912 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
913
914 preempt_disable();
915
aba85929
JH
916 /* Invalidate page table entries */
917 kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
918
91e4f1b6
JH
919 /*
920 * Probe the shadow host TLB for the entry being overwritten, if one
921 * matches, invalidate it
922 */
57e3869c 923 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
91e4f1b6
JH
924
925 /* Invalidate the whole ASID on other CPUs */
926 cpu = smp_processor_id();
927 for_each_possible_cpu(i) {
928 if (i == cpu)
929 continue;
930 if (user)
c550d539
JH
931 cpu_context(i, user_mm) = 0;
932 cpu_context(i, kern_mm) = 0;
91e4f1b6
JH
933 }
934
935 preempt_enable();
936}
937
e685c689
SL
938/* Write Guest TLB Entry @ Index */
939enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
940{
941 struct mips_coproc *cop0 = vcpu->arch.cop0;
942 int index = kvm_read_c0_guest_index(cop0);
e685c689 943 struct kvm_mips_tlb *tlb = NULL;
8cffd197 944 unsigned long pc = vcpu->arch.pc;
e685c689
SL
945
946 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
6ad78a5c 947 kvm_debug("%s: illegal index: %d\n", __func__, index);
8cffd197 948 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
6ad78a5c
DCZ
949 pc, index, kvm_read_c0_guest_entryhi(cop0),
950 kvm_read_c0_guest_entrylo0(cop0),
951 kvm_read_c0_guest_entrylo1(cop0),
952 kvm_read_c0_guest_pagemask(cop0));
e685c689
SL
953 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
954 }
955
956 tlb = &vcpu->arch.guest_tlb[index];
91e4f1b6
JH
957
958 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
e685c689
SL
959
960 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
961 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
9fbfb06a
JH
962 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
963 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
e685c689 964
8cffd197 965 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
d116e812
DCZ
966 pc, index, kvm_read_c0_guest_entryhi(cop0),
967 kvm_read_c0_guest_entrylo0(cop0),
968 kvm_read_c0_guest_entrylo1(cop0),
969 kvm_read_c0_guest_pagemask(cop0));
e685c689 970
d98403a5 971 return EMULATE_DONE;
e685c689
SL
972}
973
974/* Write Guest TLB Entry @ Random Index */
975enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
976{
977 struct mips_coproc *cop0 = vcpu->arch.cop0;
e685c689 978 struct kvm_mips_tlb *tlb = NULL;
8cffd197 979 unsigned long pc = vcpu->arch.pc;
e685c689
SL
980 int index;
981
e685c689
SL
982 get_random_bytes(&index, sizeof(index));
983 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
e685c689 984
e685c689
SL
985 tlb = &vcpu->arch.guest_tlb[index];
986
91e4f1b6 987 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
e685c689
SL
988
989 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
990 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
9fbfb06a
JH
991 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
992 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
e685c689 993
8cffd197 994 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
d116e812
DCZ
995 pc, index, kvm_read_c0_guest_entryhi(cop0),
996 kvm_read_c0_guest_entrylo0(cop0),
997 kvm_read_c0_guest_entrylo1(cop0));
e685c689 998
d98403a5 999 return EMULATE_DONE;
e685c689
SL
1000}
1001
1002enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
1003{
1004 struct mips_coproc *cop0 = vcpu->arch.cop0;
1005 long entryhi = kvm_read_c0_guest_entryhi(cop0);
8cffd197 1006 unsigned long pc = vcpu->arch.pc;
e685c689
SL
1007 int index = -1;
1008
1009 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1010
1011 kvm_write_c0_guest_index(cop0, index);
1012
8cffd197 1013 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
e685c689
SL
1014 index);
1015
d98403a5 1016 return EMULATE_DONE;
e685c689
SL
1017}
1018
c771607a
JH
1019/**
1020 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
1021 * @vcpu: Virtual CPU.
1022 *
1023 * Finds the mask of bits which are writable in the guest's Config1 CP0
1024 * register, by userland (currently read-only to the guest).
1025 */
1026unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
1027{
6cdc65e3
JH
1028 unsigned int mask = 0;
1029
1030 /* Permit FPU to be present if FPU is supported */
1031 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
1032 mask |= MIPS_CONF1_FP;
1033
1034 return mask;
c771607a
JH
1035}
1036
1037/**
1038 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
1039 * @vcpu: Virtual CPU.
1040 *
1041 * Finds the mask of bits which are writable in the guest's Config3 CP0
1042 * register, by userland (currently read-only to the guest).
1043 */
1044unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
1045{
cef061d0
JH
1046 /* Config4 and ULRI are optional */
1047 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
2b6009d6
JH
1048
1049 /* Permit MSA to be present if MSA is supported */
1050 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1051 mask |= MIPS_CONF3_MSA;
1052
1053 return mask;
c771607a
JH
1054}
1055
1056/**
1057 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1058 * @vcpu: Virtual CPU.
1059 *
1060 * Finds the mask of bits which are writable in the guest's Config4 CP0
1061 * register, by userland (currently read-only to the guest).
1062 */
1063unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1064{
1065 /* Config5 is optional */
05108709
JH
1066 unsigned int mask = MIPS_CONF_M;
1067
1068 /* KScrExist */
1069 mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
1070
1071 return mask;
c771607a
JH
1072}
1073
1074/**
1075 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1076 * @vcpu: Virtual CPU.
1077 *
1078 * Finds the mask of bits which are writable in the guest's Config5 CP0
1079 * register, by the guest itself.
1080 */
1081unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1082{
6cdc65e3
JH
1083 unsigned int mask = 0;
1084
2b6009d6
JH
1085 /* Permit MSAEn changes if MSA supported and enabled */
1086 if (kvm_mips_guest_has_msa(&vcpu->arch))
1087 mask |= MIPS_CONF5_MSAEN;
1088
6cdc65e3
JH
1089 /*
1090 * Permit guest FPU mode changes if FPU is enabled and the relevant
1091 * feature exists according to FIR register.
1092 */
1093 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1094 if (cpu_has_fre)
1095 mask |= MIPS_CONF5_FRE;
1096 /* We don't support UFR or UFE */
1097 }
1098
1099 return mask;
c771607a
JH
1100}
1101
258f3a2e
JH
1102enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1103 u32 *opc, u32 cause,
bdb7ed86 1104 struct kvm_run *run,
d116e812 1105 struct kvm_vcpu *vcpu)
e685c689
SL
1106{
1107 struct mips_coproc *cop0 = vcpu->arch.cop0;
c550d539 1108 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
e685c689 1109 enum emulation_result er = EMULATE_DONE;
258f3a2e 1110 u32 rt, rd, sel;
e685c689 1111 unsigned long curr_pc;
91e4f1b6 1112 int cpu, i;
e685c689
SL
1113
1114 /*
1115 * Update PC and hold onto current PC in case there is
1116 * an error and we want to rollback the PC
1117 */
1118 curr_pc = vcpu->arch.pc;
1119 er = update_pc(vcpu, cause);
d116e812 1120 if (er == EMULATE_FAIL)
e685c689 1121 return er;
e685c689 1122
258f3a2e
JH
1123 if (inst.co_format.co) {
1124 switch (inst.co_format.func) {
e685c689
SL
1125 case tlbr_op: /* Read indexed TLB entry */
1126 er = kvm_mips_emul_tlbr(vcpu);
1127 break;
1128 case tlbwi_op: /* Write indexed */
1129 er = kvm_mips_emul_tlbwi(vcpu);
1130 break;
1131 case tlbwr_op: /* Write random */
1132 er = kvm_mips_emul_tlbwr(vcpu);
1133 break;
1134 case tlbp_op: /* TLB Probe */
1135 er = kvm_mips_emul_tlbp(vcpu);
1136 break;
1137 case rfe_op:
6ad78a5c 1138 kvm_err("!!!COP0_RFE!!!\n");
e685c689
SL
1139 break;
1140 case eret_op:
1141 er = kvm_mips_emul_eret(vcpu);
1142 goto dont_update_pc;
e685c689
SL
1143 case wait_op:
1144 er = kvm_mips_emul_wait(vcpu);
1145 break;
1146 }
1147 } else {
258f3a2e
JH
1148 rt = inst.c0r_format.rt;
1149 rd = inst.c0r_format.rd;
1150 sel = inst.c0r_format.sel;
1151
1152 switch (inst.c0r_format.rs) {
e685c689
SL
1153 case mfc_op:
1154#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1155 cop0->stat[rd][sel]++;
1156#endif
1157 /* Get reg */
1158 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
172e02d1
JH
1159 vcpu->arch.gprs[rt] =
1160 (s32)kvm_mips_read_count(vcpu);
e685c689
SL
1161 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1162 vcpu->arch.gprs[rt] = 0x0;
1163#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1164 kvm_mips_trans_mfc0(inst, opc, vcpu);
1165#endif
d116e812 1166 } else {
172e02d1 1167 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
e685c689
SL
1168
1169#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1170 kvm_mips_trans_mfc0(inst, opc, vcpu);
1171#endif
1172 }
1173
6398da13
JH
1174 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1175 KVM_TRACE_COP0(rd, sel),
1176 vcpu->arch.gprs[rt]);
e685c689
SL
1177 break;
1178
1179 case dmfc_op:
1180 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
6398da13
JH
1181
1182 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1183 KVM_TRACE_COP0(rd, sel),
1184 vcpu->arch.gprs[rt]);
e685c689
SL
1185 break;
1186
1187 case mtc_op:
1188#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1189 cop0->stat[rd][sel]++;
1190#endif
6398da13
JH
1191 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1192 KVM_TRACE_COP0(rd, sel),
1193 vcpu->arch.gprs[rt]);
1194
e685c689
SL
1195 if ((rd == MIPS_CP0_TLB_INDEX)
1196 && (vcpu->arch.gprs[rt] >=
1197 KVM_MIPS_GUEST_TLB_SIZE)) {
6ad78a5c
DCZ
1198 kvm_err("Invalid TLB Index: %ld",
1199 vcpu->arch.gprs[rt]);
e685c689
SL
1200 er = EMULATE_FAIL;
1201 break;
1202 }
1203#define C0_EBASE_CORE_MASK 0xff
1204 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1205 /* Preserve CORE number */
1206 kvm_change_c0_guest_ebase(cop0,
1207 ~(C0_EBASE_CORE_MASK),
1208 vcpu->arch.gprs[rt]);
6ad78a5c
DCZ
1209 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1210 kvm_read_c0_guest_ebase(cop0));
e685c689 1211 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
8cffd197 1212 u32 nasid =
ca64c2be 1213 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
bf18db4e 1214 if (((kvm_read_c0_guest_entryhi(cop0) &
ca64c2be 1215 KVM_ENTRYHI_ASID) != nasid)) {
9887d1c7 1216 trace_kvm_asid_change(vcpu,
d116e812 1217 kvm_read_c0_guest_entryhi(cop0)
9887d1c7
JH
1218 & KVM_ENTRYHI_ASID,
1219 nasid);
e685c689 1220
a31b50d7
JH
1221 /*
1222 * Flush entries from the GVA page
1223 * tables.
1224 * Guest user page table will get
1225 * flushed lazily on re-entry to guest
1226 * user if the guest ASID actually
1227 * changes.
1228 */
1229 kvm_mips_flush_gva_pt(kern_mm->pgd,
1230 KMF_KERN);
1231
25b08c7f
JH
1232 /*
1233 * Regenerate/invalidate kernel MMU
1234 * context.
1235 * The user MMU context will be
1236 * regenerated lazily on re-entry to
1237 * guest user if the guest ASID actually
1238 * changes.
1239 */
91e4f1b6 1240 preempt_disable();
91e4f1b6 1241 cpu = smp_processor_id();
a98dd741 1242 get_new_mmu_context(kern_mm, cpu);
91e4f1b6 1243 for_each_possible_cpu(i)
25b08c7f 1244 if (i != cpu)
c550d539 1245 cpu_context(i, kern_mm) = 0;
91e4f1b6 1246 preempt_enable();
e685c689
SL
1247 }
1248 kvm_write_c0_guest_entryhi(cop0,
1249 vcpu->arch.gprs[rt]);
1250 }
1251 /* Are we writing to COUNT */
1252 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
e30492bb 1253 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
e685c689
SL
1254 goto done;
1255 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
e685c689
SL
1256 /* If we are writing to COMPARE */
1257 /* Clear pending timer interrupt, if any */
e30492bb 1258 kvm_mips_write_compare(vcpu,
b45bacd2
JH
1259 vcpu->arch.gprs[rt],
1260 true);
e685c689 1261 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
6cdc65e3
JH
1262 unsigned int old_val, val, change;
1263
1264 old_val = kvm_read_c0_guest_status(cop0);
1265 val = vcpu->arch.gprs[rt];
1266 change = val ^ old_val;
1267
1268 /* Make sure that the NMI bit is never set */
1269 val &= ~ST0_NMI;
1270
1271 /*
1272 * Don't allow CU1 or FR to be set unless FPU
1273 * capability enabled and exists in guest
1274 * configuration.
1275 */
1276 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1277 val &= ~(ST0_CU1 | ST0_FR);
1278
1279 /*
1280 * Also don't allow FR to be set if host doesn't
1281 * support it.
1282 */
1283 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1284 val &= ~ST0_FR;
1285
1286
1287 /* Handle changes in FPU mode */
1288 preempt_disable();
1289
1290 /*
1291 * FPU and Vector register state is made
1292 * UNPREDICTABLE by a change of FR, so don't
1293 * even bother saving it.
1294 */
1295 if (change & ST0_FR)
1296 kvm_drop_fpu(vcpu);
1297
2b6009d6
JH
1298 /*
1299 * If MSA state is already live, it is undefined
1300 * how it interacts with FR=0 FPU state, and we
1301 * don't want to hit reserved instruction
1302 * exceptions trying to save the MSA state later
1303 * when CU=1 && FR=1, so play it safe and save
1304 * it first.
1305 */
1306 if (change & ST0_CU1 && !(val & ST0_FR) &&
f943176a 1307 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
2b6009d6
JH
1308 kvm_lose_fpu(vcpu);
1309
d116e812 1310 /*
6cdc65e3
JH
1311 * Propagate CU1 (FPU enable) changes
1312 * immediately if the FPU context is already
1313 * loaded. When disabling we leave the context
1314 * loaded so it can be quickly enabled again in
1315 * the near future.
d116e812 1316 */
6cdc65e3 1317 if (change & ST0_CU1 &&
f943176a 1318 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
6cdc65e3
JH
1319 change_c0_status(ST0_CU1, val);
1320
1321 preempt_enable();
1322
1323 kvm_write_c0_guest_status(cop0, val);
e685c689
SL
1324
1325#ifdef CONFIG_KVM_MIPS_DYN_TRANS
6cdc65e3
JH
1326 /*
1327 * If FPU present, we need CU1/FR bits to take
1328 * effect fairly soon.
1329 */
1330 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1331 kvm_mips_trans_mtc0(inst, opc, vcpu);
e685c689 1332#endif
6cdc65e3
JH
1333 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1334 unsigned int old_val, val, change, wrmask;
1335
1336 old_val = kvm_read_c0_guest_config5(cop0);
1337 val = vcpu->arch.gprs[rt];
1338
1339 /* Only a few bits are writable in Config5 */
1340 wrmask = kvm_mips_config5_wrmask(vcpu);
1341 change = (val ^ old_val) & wrmask;
1342 val = old_val ^ change;
1343
1344
2b6009d6 1345 /* Handle changes in FPU/MSA modes */
6cdc65e3
JH
1346 preempt_disable();
1347
1348 /*
1349 * Propagate FRE changes immediately if the FPU
1350 * context is already loaded.
1351 */
1352 if (change & MIPS_CONF5_FRE &&
f943176a 1353 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
6cdc65e3
JH
1354 change_c0_config5(MIPS_CONF5_FRE, val);
1355
2b6009d6
JH
1356 /*
1357 * Propagate MSAEn changes immediately if the
1358 * MSA context is already loaded. When disabling
1359 * we leave the context loaded so it can be
1360 * quickly enabled again in the near future.
1361 */
1362 if (change & MIPS_CONF5_MSAEN &&
f943176a 1363 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
2b6009d6
JH
1364 change_c0_config5(MIPS_CONF5_MSAEN,
1365 val);
1366
6cdc65e3
JH
1367 preempt_enable();
1368
1369 kvm_write_c0_guest_config5(cop0, val);
e30492bb 1370 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
8cffd197 1371 u32 old_cause, new_cause;
d116e812 1372
e30492bb
JH
1373 old_cause = kvm_read_c0_guest_cause(cop0);
1374 new_cause = vcpu->arch.gprs[rt];
1375 /* Update R/W bits */
1376 kvm_change_c0_guest_cause(cop0, 0x08800300,
1377 new_cause);
1378 /* DC bit enabling/disabling timer? */
1379 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1380 if (new_cause & CAUSEF_DC)
1381 kvm_mips_count_disable_cause(vcpu);
1382 else
1383 kvm_mips_count_enable_cause(vcpu);
1384 }
cef061d0
JH
1385 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1386 u32 mask = MIPS_HWRENA_CPUNUM |
1387 MIPS_HWRENA_SYNCISTEP |
1388 MIPS_HWRENA_CC |
1389 MIPS_HWRENA_CCRES;
1390
1391 if (kvm_read_c0_guest_config3(cop0) &
1392 MIPS_CONF3_ULRI)
1393 mask |= MIPS_HWRENA_ULR;
1394 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
e685c689
SL
1395 } else {
1396 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1397#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1398 kvm_mips_trans_mtc0(inst, opc, vcpu);
1399#endif
1400 }
e685c689
SL
1401 break;
1402
1403 case dmtc_op:
6ad78a5c
DCZ
1404 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1405 vcpu->arch.pc, rt, rd, sel);
6398da13
JH
1406 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1407 KVM_TRACE_COP0(rd, sel),
1408 vcpu->arch.gprs[rt]);
e685c689
SL
1409 er = EMULATE_FAIL;
1410 break;
1411
b2c59635 1412 case mfmc0_op:
e685c689
SL
1413#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1414 cop0->stat[MIPS_CP0_STATUS][0]++;
1415#endif
caa1faa7 1416 if (rt != 0)
e685c689
SL
1417 vcpu->arch.gprs[rt] =
1418 kvm_read_c0_guest_status(cop0);
e685c689 1419 /* EI */
258f3a2e 1420 if (inst.mfmc0_format.sc) {
b2c59635 1421 kvm_debug("[%#lx] mfmc0_op: EI\n",
e685c689
SL
1422 vcpu->arch.pc);
1423 kvm_set_c0_guest_status(cop0, ST0_IE);
1424 } else {
b2c59635 1425 kvm_debug("[%#lx] mfmc0_op: DI\n",
e685c689
SL
1426 vcpu->arch.pc);
1427 kvm_clear_c0_guest_status(cop0, ST0_IE);
1428 }
1429
1430 break;
1431
1432 case wrpgpr_op:
1433 {
8cffd197
JH
1434 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1435 u32 pss =
e685c689 1436 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
d116e812
DCZ
1437 /*
1438 * We don't support any shadow register sets, so
1439 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1440 */
e685c689
SL
1441 if (css || pss) {
1442 er = EMULATE_FAIL;
1443 break;
1444 }
1445 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1446 vcpu->arch.gprs[rt]);
1447 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1448 }
1449 break;
1450 default:
6ad78a5c 1451 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
258f3a2e 1452 vcpu->arch.pc, inst.c0r_format.rs);
e685c689
SL
1453 er = EMULATE_FAIL;
1454 break;
1455 }
1456 }
1457
1458done:
d116e812
DCZ
1459 /* Rollback PC only if emulation was unsuccessful */
1460 if (er == EMULATE_FAIL)
e685c689 1461 vcpu->arch.pc = curr_pc;
e685c689
SL
1462
1463dont_update_pc:
1464 /*
1465 * This is for special instructions whose emulation
1466 * updates the PC, so do not overwrite the PC under
1467 * any circumstances
1468 */
1469
1470 return er;
1471}
1472
258f3a2e
JH
1473enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1474 u32 cause,
d116e812
DCZ
1475 struct kvm_run *run,
1476 struct kvm_vcpu *vcpu)
e685c689
SL
1477{
1478 enum emulation_result er = EMULATE_DO_MMIO;
258f3a2e 1479 u32 rt;
8cffd197 1480 u32 bytes;
e685c689
SL
1481 void *data = run->mmio.data;
1482 unsigned long curr_pc;
1483
1484 /*
1485 * Update PC and hold onto current PC in case there is
1486 * an error and we want to rollback the PC
1487 */
1488 curr_pc = vcpu->arch.pc;
1489 er = update_pc(vcpu, cause);
1490 if (er == EMULATE_FAIL)
1491 return er;
1492
258f3a2e 1493 rt = inst.i_format.rt;
e685c689 1494
258f3a2e 1495 switch (inst.i_format.opcode) {
e685c689
SL
1496 case sb_op:
1497 bytes = 1;
1498 if (bytes > sizeof(run->mmio.data)) {
1499 kvm_err("%s: bad MMIO length: %d\n", __func__,
1500 run->mmio.len);
1501 }
1502 run->mmio.phys_addr =
1503 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1504 host_cp0_badvaddr);
1505 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1506 er = EMULATE_FAIL;
1507 break;
1508 }
1509 run->mmio.len = bytes;
1510 run->mmio.is_write = 1;
1511 vcpu->mmio_needed = 1;
1512 vcpu->mmio_is_write = 1;
1513 *(u8 *) data = vcpu->arch.gprs[rt];
1514 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1515 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
8cffd197 1516 *(u8 *) data);
e685c689
SL
1517
1518 break;
1519
1520 case sw_op:
1521 bytes = 4;
1522 if (bytes > sizeof(run->mmio.data)) {
1523 kvm_err("%s: bad MMIO length: %d\n", __func__,
1524 run->mmio.len);
1525 }
1526 run->mmio.phys_addr =
1527 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1528 host_cp0_badvaddr);
1529 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1530 er = EMULATE_FAIL;
1531 break;
1532 }
1533
1534 run->mmio.len = bytes;
1535 run->mmio.is_write = 1;
1536 vcpu->mmio_needed = 1;
1537 vcpu->mmio_is_write = 1;
8cffd197 1538 *(u32 *) data = vcpu->arch.gprs[rt];
e685c689
SL
1539
1540 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1541 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
8cffd197 1542 vcpu->arch.gprs[rt], *(u32 *) data);
e685c689
SL
1543 break;
1544
1545 case sh_op:
1546 bytes = 2;
1547 if (bytes > sizeof(run->mmio.data)) {
1548 kvm_err("%s: bad MMIO length: %d\n", __func__,
1549 run->mmio.len);
1550 }
1551 run->mmio.phys_addr =
1552 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1553 host_cp0_badvaddr);
1554 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1555 er = EMULATE_FAIL;
1556 break;
1557 }
1558
1559 run->mmio.len = bytes;
1560 run->mmio.is_write = 1;
1561 vcpu->mmio_needed = 1;
1562 vcpu->mmio_is_write = 1;
8cffd197 1563 *(u16 *) data = vcpu->arch.gprs[rt];
e685c689
SL
1564
1565 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1566 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
8cffd197 1567 vcpu->arch.gprs[rt], *(u32 *) data);
e685c689
SL
1568 break;
1569
1570 default:
d86c1ebe 1571 kvm_err("Store not yet supported (inst=0x%08x)\n",
258f3a2e 1572 inst.word);
e685c689
SL
1573 er = EMULATE_FAIL;
1574 break;
1575 }
1576
d116e812
DCZ
1577 /* Rollback PC if emulation was unsuccessful */
1578 if (er == EMULATE_FAIL)
e685c689 1579 vcpu->arch.pc = curr_pc;
e685c689
SL
1580
1581 return er;
1582}
1583
258f3a2e
JH
1584enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1585 u32 cause, struct kvm_run *run,
d116e812 1586 struct kvm_vcpu *vcpu)
e685c689
SL
1587{
1588 enum emulation_result er = EMULATE_DO_MMIO;
e1e575f6 1589 unsigned long curr_pc;
258f3a2e 1590 u32 op, rt;
8cffd197 1591 u32 bytes;
e685c689 1592
258f3a2e
JH
1593 rt = inst.i_format.rt;
1594 op = inst.i_format.opcode;
e685c689 1595
e1e575f6
JH
1596 /*
1597 * Find the resume PC now while we have safe and easy access to the
1598 * prior branch instruction, and save it for
1599 * kvm_mips_complete_mmio_load() to restore later.
1600 */
1601 curr_pc = vcpu->arch.pc;
1602 er = update_pc(vcpu, cause);
1603 if (er == EMULATE_FAIL)
1604 return er;
1605 vcpu->arch.io_pc = vcpu->arch.pc;
1606 vcpu->arch.pc = curr_pc;
1607
e685c689
SL
1608 vcpu->arch.io_gpr = rt;
1609
1610 switch (op) {
1611 case lw_op:
1612 bytes = 4;
1613 if (bytes > sizeof(run->mmio.data)) {
1614 kvm_err("%s: bad MMIO length: %d\n", __func__,
1615 run->mmio.len);
1616 er = EMULATE_FAIL;
1617 break;
1618 }
1619 run->mmio.phys_addr =
1620 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1621 host_cp0_badvaddr);
1622 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1623 er = EMULATE_FAIL;
1624 break;
1625 }
1626
1627 run->mmio.len = bytes;
1628 run->mmio.is_write = 0;
1629 vcpu->mmio_needed = 1;
1630 vcpu->mmio_is_write = 0;
1631 break;
1632
1633 case lh_op:
1634 case lhu_op:
1635 bytes = 2;
1636 if (bytes > sizeof(run->mmio.data)) {
1637 kvm_err("%s: bad MMIO length: %d\n", __func__,
1638 run->mmio.len);
1639 er = EMULATE_FAIL;
1640 break;
1641 }
1642 run->mmio.phys_addr =
1643 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1644 host_cp0_badvaddr);
1645 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1646 er = EMULATE_FAIL;
1647 break;
1648 }
1649
1650 run->mmio.len = bytes;
1651 run->mmio.is_write = 0;
1652 vcpu->mmio_needed = 1;
1653 vcpu->mmio_is_write = 0;
1654
1655 if (op == lh_op)
1656 vcpu->mmio_needed = 2;
1657 else
1658 vcpu->mmio_needed = 1;
1659
1660 break;
1661
1662 case lbu_op:
1663 case lb_op:
1664 bytes = 1;
1665 if (bytes > sizeof(run->mmio.data)) {
1666 kvm_err("%s: bad MMIO length: %d\n", __func__,
1667 run->mmio.len);
1668 er = EMULATE_FAIL;
1669 break;
1670 }
1671 run->mmio.phys_addr =
1672 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1673 host_cp0_badvaddr);
1674 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1675 er = EMULATE_FAIL;
1676 break;
1677 }
1678
1679 run->mmio.len = bytes;
1680 run->mmio.is_write = 0;
1681 vcpu->mmio_is_write = 0;
1682
1683 if (op == lb_op)
1684 vcpu->mmio_needed = 2;
1685 else
1686 vcpu->mmio_needed = 1;
1687
1688 break;
1689
1690 default:
d86c1ebe 1691 kvm_err("Load not yet supported (inst=0x%08x)\n",
258f3a2e 1692 inst.word);
e685c689
SL
1693 er = EMULATE_FAIL;
1694 break;
1695 }
1696
1697 return er;
1698}
1699
258f3a2e
JH
1700enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1701 u32 *opc, u32 cause,
d116e812
DCZ
1702 struct kvm_run *run,
1703 struct kvm_vcpu *vcpu)
e685c689
SL
1704{
1705 struct mips_coproc *cop0 = vcpu->arch.cop0;
e685c689 1706 enum emulation_result er = EMULATE_DONE;
8cffd197
JH
1707 u32 cache, op_inst, op, base;
1708 s16 offset;
e685c689
SL
1709 struct kvm_vcpu_arch *arch = &vcpu->arch;
1710 unsigned long va;
1711 unsigned long curr_pc;
1712
1713 /*
1714 * Update PC and hold onto current PC in case there is
1715 * an error and we want to rollback the PC
1716 */
1717 curr_pc = vcpu->arch.pc;
1718 er = update_pc(vcpu, cause);
1719 if (er == EMULATE_FAIL)
1720 return er;
1721
258f3a2e
JH
1722 base = inst.i_format.rs;
1723 op_inst = inst.i_format.rt;
5cc4aafc
JH
1724 if (cpu_has_mips_r6)
1725 offset = inst.spec3_format.simmediate;
1726 else
1727 offset = inst.i_format.simmediate;
f4956f62
JH
1728 cache = op_inst & CacheOp_Cache;
1729 op = op_inst & CacheOp_Op;
e685c689
SL
1730
1731 va = arch->gprs[base] + offset;
1732
1733 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1734 cache, op, base, arch->gprs[base], offset);
1735
d116e812
DCZ
1736 /*
1737 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1738 * invalidate the caches entirely by stepping through all the
1739 * ways/indexes
e685c689 1740 */
f4956f62 1741 if (op == Index_Writeback_Inv) {
d116e812
DCZ
1742 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1743 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1744 arch->gprs[base], offset);
e685c689 1745
f4956f62 1746 if (cache == Cache_D)
e685c689 1747 r4k_blast_dcache();
f4956f62 1748 else if (cache == Cache_I)
e685c689
SL
1749 r4k_blast_icache();
1750 else {
6ad78a5c
DCZ
1751 kvm_err("%s: unsupported CACHE INDEX operation\n",
1752 __func__);
e685c689
SL
1753 return EMULATE_FAIL;
1754 }
1755
1756#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1757 kvm_mips_trans_cache_index(inst, opc, vcpu);
1758#endif
1759 goto done;
1760 }
1761
1762 preempt_disable();
1763 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
9b731bcf
JH
1764 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1765 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1766 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1767 __func__, va, vcpu, read_c0_entryhi());
1768 er = EMULATE_FAIL;
1769 preempt_enable();
1770 goto done;
1771 }
e685c689
SL
1772 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1773 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1774 int index;
1775
1776 /* If an entry already exists then skip */
d116e812 1777 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
e685c689 1778 goto skip_fault;
e685c689 1779
d116e812
DCZ
1780 /*
1781 * If address not in the guest TLB, then give the guest a fault,
1782 * the resulting handler will do the right thing
e685c689
SL
1783 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
48c4ac97 1785 (kvm_read_c0_guest_entryhi
ca64c2be 1786 (cop0) & KVM_ENTRYHI_ASID));
e685c689
SL
1787
1788 if (index < 0) {
e685c689 1789 vcpu->arch.host_cp0_badvaddr = va;
6df82a7b 1790 vcpu->arch.pc = curr_pc;
e685c689
SL
1791 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1792 vcpu);
1793 preempt_enable();
1794 goto dont_update_pc;
1795 } else {
1796 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
d116e812
DCZ
1797 /*
1798 * Check if the entry is valid, if not then setup a TLB
1799 * invalid exception to the guest
1800 */
e685c689 1801 if (!TLB_IS_VALID(*tlb, va)) {
6df82a7b
JH
1802 vcpu->arch.host_cp0_badvaddr = va;
1803 vcpu->arch.pc = curr_pc;
e685c689
SL
1804 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1805 run, vcpu);
1806 preempt_enable();
1807 goto dont_update_pc;
9b731bcf
JH
1808 }
1809 /*
1810 * We fault an entry from the guest tlb to the
1811 * shadow host TLB
1812 */
7e3d2a75
JH
1813 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1814 va)) {
9b731bcf
JH
1815 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1816 __func__, va, index, vcpu,
1817 read_c0_entryhi());
1818 er = EMULATE_FAIL;
1819 preempt_enable();
1820 goto done;
e685c689
SL
1821 }
1822 }
1823 } else {
6ad78a5c
DCZ
1824 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1825 cache, op, base, arch->gprs[base], offset);
e685c689
SL
1826 er = EMULATE_FAIL;
1827 preempt_enable();
cc81e948 1828 goto done;
e685c689
SL
1829
1830 }
1831
1832skip_fault:
1833 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
f4956f62 1834 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
8af0e3c2 1835 protected_writeback_dcache_line(va);
e685c689
SL
1836
1837#ifdef CONFIG_KVM_MIPS_DYN_TRANS
d116e812
DCZ
1838 /*
1839 * Replace the CACHE instruction, with a SYNCI, not the same,
1840 * but avoids a trap
1841 */
e685c689
SL
1842 kvm_mips_trans_cache_va(inst, opc, vcpu);
1843#endif
f4956f62 1844 } else if (op_inst == Hit_Invalidate_I) {
8af0e3c2
JH
1845 protected_writeback_dcache_line(va);
1846 protected_flush_icache_line(va);
e685c689
SL
1847
1848#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1849 /* Replace the CACHE instruction, with a SYNCI */
1850 kvm_mips_trans_cache_va(inst, opc, vcpu);
1851#endif
1852 } else {
6ad78a5c
DCZ
1853 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1854 cache, op, base, arch->gprs[base], offset);
e685c689 1855 er = EMULATE_FAIL;
e685c689
SL
1856 }
1857
1858 preempt_enable();
cc81e948
JH
1859done:
1860 /* Rollback PC only if emulation was unsuccessful */
1861 if (er == EMULATE_FAIL)
1862 vcpu->arch.pc = curr_pc;
e685c689 1863
d116e812 1864dont_update_pc:
cc81e948
JH
1865 /*
1866 * This is for exceptions whose emulation updates the PC, so do not
1867 * overwrite the PC under any circumstances
1868 */
1869
e685c689
SL
1870 return er;
1871}
1872
31cf7498 1873enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
d116e812
DCZ
1874 struct kvm_run *run,
1875 struct kvm_vcpu *vcpu)
e685c689 1876{
258f3a2e 1877 union mips_instruction inst;
e685c689 1878 enum emulation_result er = EMULATE_DONE;
122e51d4 1879 int err;
e685c689 1880
d116e812
DCZ
1881 /* Fetch the instruction. */
1882 if (cause & CAUSEF_BD)
e685c689 1883 opc += 1;
6a97c775 1884 err = kvm_get_badinstr(opc, vcpu, &inst.word);
122e51d4
JH
1885 if (err)
1886 return EMULATE_FAIL;
e685c689 1887
258f3a2e 1888 switch (inst.r_format.opcode) {
e685c689
SL
1889 case cop0_op:
1890 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1891 break;
1892 case sb_op:
1893 case sh_op:
1894 case sw_op:
1895 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1896 break;
1897 case lb_op:
1898 case lbu_op:
1899 case lhu_op:
1900 case lh_op:
1901 case lw_op:
1902 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1903 break;
1904
5cc4aafc 1905#ifndef CONFIG_CPU_MIPSR6
e685c689
SL
1906 case cache_op:
1907 ++vcpu->stat.cache_exits;
1e09e86a 1908 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
e685c689
SL
1909 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1910 break;
5cc4aafc
JH
1911#else
1912 case spec3_op:
1913 switch (inst.spec3_format.func) {
1914 case cache6_op:
1915 ++vcpu->stat.cache_exits;
1916 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1917 er = kvm_mips_emulate_cache(inst, opc, cause, run,
1918 vcpu);
1919 break;
1920 default:
1921 goto unknown;
1922 };
1923 break;
1924unknown:
1925#endif
e685c689
SL
1926
1927 default:
6ad78a5c 1928 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
258f3a2e 1929 inst.word);
e685c689
SL
1930 kvm_arch_vcpu_dump_regs(vcpu);
1931 er = EMULATE_FAIL;
1932 break;
1933 }
1934
1935 return er;
1936}
1937
31cf7498 1938enum emulation_result kvm_mips_emulate_syscall(u32 cause,
bdb7ed86 1939 u32 *opc,
d116e812
DCZ
1940 struct kvm_run *run,
1941 struct kvm_vcpu *vcpu)
e685c689
SL
1942{
1943 struct mips_coproc *cop0 = vcpu->arch.cop0;
1944 struct kvm_vcpu_arch *arch = &vcpu->arch;
1945 enum emulation_result er = EMULATE_DONE;
1946
1947 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1948 /* save old pc */
1949 kvm_write_c0_guest_epc(cop0, arch->pc);
1950 kvm_set_c0_guest_status(cop0, ST0_EXL);
1951
1952 if (cause & CAUSEF_BD)
1953 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1954 else
1955 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1956
1957 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1958
1959 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 1960 (EXCCODE_SYS << CAUSEB_EXCCODE));
e685c689
SL
1961
1962 /* Set PC to the exception entry point */
1963 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1964
1965 } else {
6ad78a5c 1966 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
e685c689
SL
1967 er = EMULATE_FAIL;
1968 }
1969
1970 return er;
1971}
1972
31cf7498 1973enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
bdb7ed86 1974 u32 *opc,
d116e812
DCZ
1975 struct kvm_run *run,
1976 struct kvm_vcpu *vcpu)
e685c689
SL
1977{
1978 struct mips_coproc *cop0 = vcpu->arch.cop0;
1979 struct kvm_vcpu_arch *arch = &vcpu->arch;
e685c689 1980 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
ca64c2be 1981 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
e685c689
SL
1982
1983 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1984 /* save old pc */
1985 kvm_write_c0_guest_epc(cop0, arch->pc);
1986 kvm_set_c0_guest_status(cop0, ST0_EXL);
1987
1988 if (cause & CAUSEF_BD)
1989 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1990 else
1991 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1992
1993 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1994 arch->pc);
1995
1996 /* set pc to the exception entry point */
1997 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1998
1999 } else {
2000 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2001 arch->pc);
2002
2003 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2004 }
2005
2006 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2007 (EXCCODE_TLBL << CAUSEB_EXCCODE));
e685c689
SL
2008
2009 /* setup badvaddr, context and entryhi registers for the guest */
2010 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2011 /* XXXKYMA: is the context register used by linux??? */
2012 kvm_write_c0_guest_entryhi(cop0, entryhi);
e685c689 2013
d98403a5 2014 return EMULATE_DONE;
e685c689
SL
2015}
2016
31cf7498 2017enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
bdb7ed86 2018 u32 *opc,
d116e812
DCZ
2019 struct kvm_run *run,
2020 struct kvm_vcpu *vcpu)
e685c689
SL
2021{
2022 struct mips_coproc *cop0 = vcpu->arch.cop0;
2023 struct kvm_vcpu_arch *arch = &vcpu->arch;
e685c689
SL
2024 unsigned long entryhi =
2025 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
ca64c2be 2026 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
e685c689
SL
2027
2028 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2029 /* save old pc */
2030 kvm_write_c0_guest_epc(cop0, arch->pc);
2031 kvm_set_c0_guest_status(cop0, ST0_EXL);
2032
2033 if (cause & CAUSEF_BD)
2034 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2035 else
2036 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2037
2038 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
2039 arch->pc);
2040
2041 /* set pc to the exception entry point */
2042 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2043
2044 } else {
2045 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2046 arch->pc);
2047 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2048 }
2049
2050 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2051 (EXCCODE_TLBL << CAUSEB_EXCCODE));
e685c689
SL
2052
2053 /* setup badvaddr, context and entryhi registers for the guest */
2054 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2055 /* XXXKYMA: is the context register used by linux??? */
2056 kvm_write_c0_guest_entryhi(cop0, entryhi);
e685c689 2057
d98403a5 2058 return EMULATE_DONE;
e685c689
SL
2059}
2060
31cf7498 2061enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
bdb7ed86 2062 u32 *opc,
d116e812
DCZ
2063 struct kvm_run *run,
2064 struct kvm_vcpu *vcpu)
e685c689
SL
2065{
2066 struct mips_coproc *cop0 = vcpu->arch.cop0;
2067 struct kvm_vcpu_arch *arch = &vcpu->arch;
e685c689 2068 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
ca64c2be 2069 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
e685c689
SL
2070
2071 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2072 /* save old pc */
2073 kvm_write_c0_guest_epc(cop0, arch->pc);
2074 kvm_set_c0_guest_status(cop0, ST0_EXL);
2075
2076 if (cause & CAUSEF_BD)
2077 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2078 else
2079 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2080
2081 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2082 arch->pc);
2083
2084 /* Set PC to the exception entry point */
2085 arch->pc = KVM_GUEST_KSEG0 + 0x0;
2086 } else {
2087 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2088 arch->pc);
2089 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2090 }
2091
2092 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2093 (EXCCODE_TLBS << CAUSEB_EXCCODE));
e685c689
SL
2094
2095 /* setup badvaddr, context and entryhi registers for the guest */
2096 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2097 /* XXXKYMA: is the context register used by linux??? */
2098 kvm_write_c0_guest_entryhi(cop0, entryhi);
e685c689 2099
d98403a5 2100 return EMULATE_DONE;
e685c689
SL
2101}
2102
31cf7498 2103enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
bdb7ed86 2104 u32 *opc,
d116e812
DCZ
2105 struct kvm_run *run,
2106 struct kvm_vcpu *vcpu)
e685c689
SL
2107{
2108 struct mips_coproc *cop0 = vcpu->arch.cop0;
2109 struct kvm_vcpu_arch *arch = &vcpu->arch;
e685c689 2110 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
ca64c2be 2111 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
e685c689
SL
2112
2113 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2114 /* save old pc */
2115 kvm_write_c0_guest_epc(cop0, arch->pc);
2116 kvm_set_c0_guest_status(cop0, ST0_EXL);
2117
2118 if (cause & CAUSEF_BD)
2119 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2120 else
2121 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2122
2123 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2124 arch->pc);
2125
2126 /* Set PC to the exception entry point */
2127 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2128 } else {
2129 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2130 arch->pc);
2131 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2132 }
2133
2134 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2135 (EXCCODE_TLBS << CAUSEB_EXCCODE));
e685c689
SL
2136
2137 /* setup badvaddr, context and entryhi registers for the guest */
2138 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2139 /* XXXKYMA: is the context register used by linux??? */
2140 kvm_write_c0_guest_entryhi(cop0, entryhi);
e685c689 2141
d98403a5 2142 return EMULATE_DONE;
e685c689
SL
2143}
2144
2145/* TLBMOD: store into address matching TLB with Dirty bit off */
31cf7498 2146enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
d116e812
DCZ
2147 struct kvm_run *run,
2148 struct kvm_vcpu *vcpu)
e685c689
SL
2149{
2150 enum emulation_result er = EMULATE_DONE;
e685c689 2151#ifdef DEBUG
3d654833
JH
2152 struct mips_coproc *cop0 = vcpu->arch.cop0;
2153 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
ca64c2be 2154 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
57e3869c 2155 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
3d654833
JH
2156 int index;
2157
d116e812 2158 /* If address not in the guest TLB, then we are in trouble */
e685c689
SL
2159 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2160 if (index < 0) {
2161 /* XXXKYMA Invalidate and retry */
57e3869c
JH
2162 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr,
2163 !kernel, kernel);
e685c689
SL
2164 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2165 __func__, entryhi);
2166 kvm_mips_dump_guest_tlbs(vcpu);
2167 kvm_mips_dump_host_tlbs();
2168 return EMULATE_FAIL;
2169 }
2170#endif
2171
2172 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2173 return er;
2174}
2175
31cf7498 2176enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
bdb7ed86 2177 u32 *opc,
d116e812
DCZ
2178 struct kvm_run *run,
2179 struct kvm_vcpu *vcpu)
e685c689
SL
2180{
2181 struct mips_coproc *cop0 = vcpu->arch.cop0;
2182 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
ca64c2be 2183 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
e685c689 2184 struct kvm_vcpu_arch *arch = &vcpu->arch;
e685c689
SL
2185
2186 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2187 /* save old pc */
2188 kvm_write_c0_guest_epc(cop0, arch->pc);
2189 kvm_set_c0_guest_status(cop0, ST0_EXL);
2190
2191 if (cause & CAUSEF_BD)
2192 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2193 else
2194 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2195
2196 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2197 arch->pc);
2198
2199 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2200 } else {
2201 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2202 arch->pc);
2203 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2204 }
2205
16d100db
JH
2206 kvm_change_c0_guest_cause(cop0, (0xff),
2207 (EXCCODE_MOD << CAUSEB_EXCCODE));
e685c689
SL
2208
2209 /* setup badvaddr, context and entryhi registers for the guest */
2210 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2211 /* XXXKYMA: is the context register used by linux??? */
2212 kvm_write_c0_guest_entryhi(cop0, entryhi);
e685c689 2213
d98403a5 2214 return EMULATE_DONE;
e685c689
SL
2215}
2216
31cf7498 2217enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
bdb7ed86 2218 u32 *opc,
d116e812
DCZ
2219 struct kvm_run *run,
2220 struct kvm_vcpu *vcpu)
e685c689
SL
2221{
2222 struct mips_coproc *cop0 = vcpu->arch.cop0;
2223 struct kvm_vcpu_arch *arch = &vcpu->arch;
e685c689
SL
2224
2225 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2226 /* save old pc */
2227 kvm_write_c0_guest_epc(cop0, arch->pc);
2228 kvm_set_c0_guest_status(cop0, ST0_EXL);
2229
2230 if (cause & CAUSEF_BD)
2231 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2232 else
2233 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2234
2235 }
2236
2237 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2238
2239 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2240 (EXCCODE_CPU << CAUSEB_EXCCODE));
e685c689
SL
2241 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2242
d98403a5 2243 return EMULATE_DONE;
e685c689
SL
2244}
2245
31cf7498 2246enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
bdb7ed86 2247 u32 *opc,
d116e812
DCZ
2248 struct kvm_run *run,
2249 struct kvm_vcpu *vcpu)
e685c689
SL
2250{
2251 struct mips_coproc *cop0 = vcpu->arch.cop0;
2252 struct kvm_vcpu_arch *arch = &vcpu->arch;
2253 enum emulation_result er = EMULATE_DONE;
2254
2255 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2256 /* save old pc */
2257 kvm_write_c0_guest_epc(cop0, arch->pc);
2258 kvm_set_c0_guest_status(cop0, ST0_EXL);
2259
2260 if (cause & CAUSEF_BD)
2261 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2262 else
2263 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2264
2265 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2266
2267 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2268 (EXCCODE_RI << CAUSEB_EXCCODE));
e685c689
SL
2269
2270 /* Set PC to the exception entry point */
2271 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2272
2273 } else {
2274 kvm_err("Trying to deliver RI when EXL is already set\n");
2275 er = EMULATE_FAIL;
2276 }
2277
2278 return er;
2279}
2280
31cf7498 2281enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
bdb7ed86 2282 u32 *opc,
d116e812
DCZ
2283 struct kvm_run *run,
2284 struct kvm_vcpu *vcpu)
e685c689
SL
2285{
2286 struct mips_coproc *cop0 = vcpu->arch.cop0;
2287 struct kvm_vcpu_arch *arch = &vcpu->arch;
2288 enum emulation_result er = EMULATE_DONE;
2289
2290 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2291 /* save old pc */
2292 kvm_write_c0_guest_epc(cop0, arch->pc);
2293 kvm_set_c0_guest_status(cop0, ST0_EXL);
2294
2295 if (cause & CAUSEF_BD)
2296 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2297 else
2298 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2299
2300 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2301
2302 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2303 (EXCCODE_BP << CAUSEB_EXCCODE));
e685c689
SL
2304
2305 /* Set PC to the exception entry point */
2306 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2307
2308 } else {
6ad78a5c 2309 kvm_err("Trying to deliver BP when EXL is already set\n");
e685c689
SL
2310 er = EMULATE_FAIL;
2311 }
2312
2313 return er;
2314}
2315
31cf7498 2316enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
bdb7ed86 2317 u32 *opc,
0a560427
JH
2318 struct kvm_run *run,
2319 struct kvm_vcpu *vcpu)
2320{
2321 struct mips_coproc *cop0 = vcpu->arch.cop0;
2322 struct kvm_vcpu_arch *arch = &vcpu->arch;
2323 enum emulation_result er = EMULATE_DONE;
2324
2325 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2326 /* save old pc */
2327 kvm_write_c0_guest_epc(cop0, arch->pc);
2328 kvm_set_c0_guest_status(cop0, ST0_EXL);
2329
2330 if (cause & CAUSEF_BD)
2331 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2332 else
2333 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2334
2335 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2336
2337 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2338 (EXCCODE_TR << CAUSEB_EXCCODE));
0a560427
JH
2339
2340 /* Set PC to the exception entry point */
2341 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2342
2343 } else {
2344 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2345 er = EMULATE_FAIL;
2346 }
2347
2348 return er;
2349}
2350
31cf7498 2351enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
bdb7ed86 2352 u32 *opc,
c2537ed9
JH
2353 struct kvm_run *run,
2354 struct kvm_vcpu *vcpu)
2355{
2356 struct mips_coproc *cop0 = vcpu->arch.cop0;
2357 struct kvm_vcpu_arch *arch = &vcpu->arch;
2358 enum emulation_result er = EMULATE_DONE;
2359
2360 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2361 /* save old pc */
2362 kvm_write_c0_guest_epc(cop0, arch->pc);
2363 kvm_set_c0_guest_status(cop0, ST0_EXL);
2364
2365 if (cause & CAUSEF_BD)
2366 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2367 else
2368 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2369
2370 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2371
2372 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2373 (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
c2537ed9
JH
2374
2375 /* Set PC to the exception entry point */
2376 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2377
2378 } else {
2379 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2380 er = EMULATE_FAIL;
2381 }
2382
2383 return er;
2384}
2385
31cf7498 2386enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
bdb7ed86 2387 u32 *opc,
1c0cd66a
JH
2388 struct kvm_run *run,
2389 struct kvm_vcpu *vcpu)
2390{
2391 struct mips_coproc *cop0 = vcpu->arch.cop0;
2392 struct kvm_vcpu_arch *arch = &vcpu->arch;
2393 enum emulation_result er = EMULATE_DONE;
2394
2395 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2396 /* save old pc */
2397 kvm_write_c0_guest_epc(cop0, arch->pc);
2398 kvm_set_c0_guest_status(cop0, ST0_EXL);
2399
2400 if (cause & CAUSEF_BD)
2401 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2402 else
2403 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2404
2405 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2406
2407 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2408 (EXCCODE_FPE << CAUSEB_EXCCODE));
1c0cd66a
JH
2409
2410 /* Set PC to the exception entry point */
2411 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2412
2413 } else {
2414 kvm_err("Trying to deliver FPE when EXL is already set\n");
2415 er = EMULATE_FAIL;
2416 }
2417
2418 return er;
2419}
2420
31cf7498 2421enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
bdb7ed86 2422 u32 *opc,
c2537ed9
JH
2423 struct kvm_run *run,
2424 struct kvm_vcpu *vcpu)
2425{
2426 struct mips_coproc *cop0 = vcpu->arch.cop0;
2427 struct kvm_vcpu_arch *arch = &vcpu->arch;
2428 enum emulation_result er = EMULATE_DONE;
2429
2430 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2431 /* save old pc */
2432 kvm_write_c0_guest_epc(cop0, arch->pc);
2433 kvm_set_c0_guest_status(cop0, ST0_EXL);
2434
2435 if (cause & CAUSEF_BD)
2436 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2437 else
2438 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2439
2440 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2441
2442 kvm_change_c0_guest_cause(cop0, (0xff),
16d100db 2443 (EXCCODE_MSADIS << CAUSEB_EXCCODE));
c2537ed9
JH
2444
2445 /* Set PC to the exception entry point */
2446 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2447
2448 } else {
2449 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2450 er = EMULATE_FAIL;
2451 }
2452
2453 return er;
2454}
2455
31cf7498 2456enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
d116e812
DCZ
2457 struct kvm_run *run,
2458 struct kvm_vcpu *vcpu)
e685c689
SL
2459{
2460 struct mips_coproc *cop0 = vcpu->arch.cop0;
2461 struct kvm_vcpu_arch *arch = &vcpu->arch;
2462 enum emulation_result er = EMULATE_DONE;
2463 unsigned long curr_pc;
258f3a2e 2464 union mips_instruction inst;
122e51d4 2465 int err;
e685c689
SL
2466
2467 /*
2468 * Update PC and hold onto current PC in case there is
2469 * an error and we want to rollback the PC
2470 */
2471 curr_pc = vcpu->arch.pc;
2472 er = update_pc(vcpu, cause);
2473 if (er == EMULATE_FAIL)
2474 return er;
2475
d116e812 2476 /* Fetch the instruction. */
e685c689
SL
2477 if (cause & CAUSEF_BD)
2478 opc += 1;
6a97c775 2479 err = kvm_get_badinstr(opc, vcpu, &inst.word);
122e51d4
JH
2480 if (err) {
2481 kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
e685c689
SL
2482 return EMULATE_FAIL;
2483 }
2484
258f3a2e 2485 if (inst.r_format.opcode == spec3_op &&
8eeab81c
JH
2486 inst.r_format.func == rdhwr_op &&
2487 inst.r_format.rs == 0 &&
2488 (inst.r_format.re >> 3) == 0) {
26f4f3b5 2489 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
258f3a2e
JH
2490 int rd = inst.r_format.rd;
2491 int rt = inst.r_format.rt;
2492 int sel = inst.r_format.re & 0x7;
6398da13 2493
26f4f3b5
JH
2494 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2495 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2496 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2497 rd, opc);
2498 goto emulate_ri;
2499 }
e685c689 2500 switch (rd) {
aff565aa 2501 case MIPS_HWR_CPUNUM: /* CPU number */
cf1fb0f2 2502 arch->gprs[rt] = vcpu->vcpu_id;
e685c689 2503 break;
aff565aa 2504 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
e685c689
SL
2505 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2506 current_cpu_data.icache.linesz);
2507 break;
aff565aa 2508 case MIPS_HWR_CC: /* Read count register */
172e02d1 2509 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
e685c689 2510 break;
aff565aa 2511 case MIPS_HWR_CCRES: /* Count register resolution */
e685c689
SL
2512 switch (current_cpu_data.cputype) {
2513 case CPU_20KC:
2514 case CPU_25KF:
2515 arch->gprs[rt] = 1;
2516 break;
2517 default:
2518 arch->gprs[rt] = 2;
2519 }
2520 break;
aff565aa 2521 case MIPS_HWR_ULR: /* Read UserLocal register */
e685c689 2522 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
e685c689
SL
2523 break;
2524
2525 default:
15505679 2526 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
26f4f3b5 2527 goto emulate_ri;
e685c689 2528 }
6398da13
JH
2529
2530 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2531 vcpu->arch.gprs[rt]);
e685c689 2532 } else {
258f3a2e
JH
2533 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2534 opc, inst.word);
26f4f3b5 2535 goto emulate_ri;
e685c689
SL
2536 }
2537
26f4f3b5
JH
2538 return EMULATE_DONE;
2539
2540emulate_ri:
e685c689 2541 /*
26f4f3b5
JH
2542 * Rollback PC (if in branch delay slot then the PC already points to
2543 * branch target), and pass the RI exception to the guest OS.
e685c689 2544 */
26f4f3b5
JH
2545 vcpu->arch.pc = curr_pc;
2546 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
e685c689
SL
2547}
2548
d116e812
DCZ
2549enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2550 struct kvm_run *run)
e685c689
SL
2551{
2552 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2553 enum emulation_result er = EMULATE_DONE;
e685c689
SL
2554
2555 if (run->mmio.len > sizeof(*gpr)) {
6ad78a5c 2556 kvm_err("Bad MMIO length: %d", run->mmio.len);
e685c689
SL
2557 er = EMULATE_FAIL;
2558 goto done;
2559 }
2560
e1e575f6
JH
2561 /* Restore saved resume PC */
2562 vcpu->arch.pc = vcpu->arch.io_pc;
e685c689
SL
2563
2564 switch (run->mmio.len) {
2565 case 4:
8cffd197 2566 *gpr = *(s32 *) run->mmio.data;
e685c689
SL
2567 break;
2568
2569 case 2:
2570 if (vcpu->mmio_needed == 2)
8cffd197 2571 *gpr = *(s16 *) run->mmio.data;
e685c689 2572 else
8cffd197 2573 *gpr = *(u16 *)run->mmio.data;
e685c689
SL
2574
2575 break;
2576 case 1:
2577 if (vcpu->mmio_needed == 2)
8cffd197 2578 *gpr = *(s8 *) run->mmio.data;
e685c689
SL
2579 else
2580 *gpr = *(u8 *) run->mmio.data;
2581 break;
2582 }
2583
e685c689
SL
2584done:
2585 return er;
2586}
2587
31cf7498 2588static enum emulation_result kvm_mips_emulate_exc(u32 cause,
bdb7ed86 2589 u32 *opc,
d116e812
DCZ
2590 struct kvm_run *run,
2591 struct kvm_vcpu *vcpu)
e685c689 2592{
8cffd197 2593 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
e685c689
SL
2594 struct mips_coproc *cop0 = vcpu->arch.cop0;
2595 struct kvm_vcpu_arch *arch = &vcpu->arch;
2596 enum emulation_result er = EMULATE_DONE;
2597
2598 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2599 /* save old pc */
2600 kvm_write_c0_guest_epc(cop0, arch->pc);
2601 kvm_set_c0_guest_status(cop0, ST0_EXL);
2602
2603 if (cause & CAUSEF_BD)
2604 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2605 else
2606 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2607
2608 kvm_change_c0_guest_cause(cop0, (0xff),
2609 (exccode << CAUSEB_EXCCODE));
2610
2611 /* Set PC to the exception entry point */
2612 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2613 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2614
2615 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2616 exccode, kvm_read_c0_guest_epc(cop0),
2617 kvm_read_c0_guest_badvaddr(cop0));
2618 } else {
6ad78a5c 2619 kvm_err("Trying to deliver EXC when EXL is already set\n");
e685c689
SL
2620 er = EMULATE_FAIL;
2621 }
2622
2623 return er;
2624}
2625
31cf7498 2626enum emulation_result kvm_mips_check_privilege(u32 cause,
bdb7ed86 2627 u32 *opc,
d116e812
DCZ
2628 struct kvm_run *run,
2629 struct kvm_vcpu *vcpu)
e685c689
SL
2630{
2631 enum emulation_result er = EMULATE_DONE;
8cffd197 2632 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
e685c689
SL
2633 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2634
2635 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2636
2637 if (usermode) {
2638 switch (exccode) {
16d100db
JH
2639 case EXCCODE_INT:
2640 case EXCCODE_SYS:
2641 case EXCCODE_BP:
2642 case EXCCODE_RI:
2643 case EXCCODE_TR:
2644 case EXCCODE_MSAFPE:
2645 case EXCCODE_FPE:
2646 case EXCCODE_MSADIS:
e685c689
SL
2647 break;
2648
16d100db 2649 case EXCCODE_CPU:
e685c689
SL
2650 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2651 er = EMULATE_PRIV_FAIL;
2652 break;
2653
16d100db 2654 case EXCCODE_MOD:
e685c689
SL
2655 break;
2656
16d100db 2657 case EXCCODE_TLBL:
d116e812
DCZ
2658 /*
2659 * We we are accessing Guest kernel space, then send an
2660 * address error exception to the guest
2661 */
e685c689 2662 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
6ad78a5c
DCZ
2663 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2664 badvaddr);
e685c689 2665 cause &= ~0xff;
16d100db 2666 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
e685c689
SL
2667 er = EMULATE_PRIV_FAIL;
2668 }
2669 break;
2670
16d100db 2671 case EXCCODE_TLBS:
d116e812
DCZ
2672 /*
2673 * We we are accessing Guest kernel space, then send an
2674 * address error exception to the guest
2675 */
e685c689 2676 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
6ad78a5c
DCZ
2677 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2678 badvaddr);
e685c689 2679 cause &= ~0xff;
16d100db 2680 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
e685c689
SL
2681 er = EMULATE_PRIV_FAIL;
2682 }
2683 break;
2684
16d100db 2685 case EXCCODE_ADES:
6ad78a5c
DCZ
2686 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2687 badvaddr);
e685c689
SL
2688 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2689 cause &= ~0xff;
16d100db 2690 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
e685c689
SL
2691 }
2692 er = EMULATE_PRIV_FAIL;
2693 break;
16d100db 2694 case EXCCODE_ADEL:
6ad78a5c
DCZ
2695 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2696 badvaddr);
e685c689
SL
2697 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2698 cause &= ~0xff;
16d100db 2699 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
e685c689
SL
2700 }
2701 er = EMULATE_PRIV_FAIL;
2702 break;
2703 default:
2704 er = EMULATE_PRIV_FAIL;
2705 break;
2706 }
2707 }
2708
d116e812 2709 if (er == EMULATE_PRIV_FAIL)
e685c689 2710 kvm_mips_emulate_exc(cause, opc, run, vcpu);
d116e812 2711
e685c689
SL
2712 return er;
2713}
2714
d116e812
DCZ
2715/*
2716 * User Address (UA) fault, this could happen if
e685c689
SL
2717 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2718 * case we pass on the fault to the guest kernel and let it handle it.
2719 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2720 * case we inject the TLB from the Guest TLB into the shadow host TLB
2721 */
31cf7498 2722enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
bdb7ed86 2723 u32 *opc,
d116e812
DCZ
2724 struct kvm_run *run,
2725 struct kvm_vcpu *vcpu)
e685c689
SL
2726{
2727 enum emulation_result er = EMULATE_DONE;
8cffd197 2728 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
e685c689
SL
2729 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2730 int index;
2731
e4e94c0f
JH
2732 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2733 vcpu->arch.host_cp0_badvaddr);
e685c689 2734
d116e812
DCZ
2735 /*
2736 * KVM would not have got the exception if this entry was valid in the
2737 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2738 * send the guest an exception. The guest exc handler should then inject
2739 * an entry into the guest TLB.
e685c689
SL
2740 */
2741 index = kvm_mips_guest_tlb_lookup(vcpu,
caa1faa7 2742 (va & VPN2_MASK) |
ca64c2be
PB
2743 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2744 KVM_ENTRYHI_ASID));
e685c689 2745 if (index < 0) {
16d100db 2746 if (exccode == EXCCODE_TLBL) {
e685c689 2747 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
16d100db 2748 } else if (exccode == EXCCODE_TLBS) {
e685c689
SL
2749 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2750 } else {
6ad78a5c
DCZ
2751 kvm_err("%s: invalid exc code: %d\n", __func__,
2752 exccode);
e685c689
SL
2753 er = EMULATE_FAIL;
2754 }
2755 } else {
2756 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2757
d116e812
DCZ
2758 /*
2759 * Check if the entry is valid, if not then setup a TLB invalid
2760 * exception to the guest
2761 */
e685c689 2762 if (!TLB_IS_VALID(*tlb, va)) {
16d100db 2763 if (exccode == EXCCODE_TLBL) {
e685c689
SL
2764 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2765 vcpu);
16d100db 2766 } else if (exccode == EXCCODE_TLBS) {
e685c689
SL
2767 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2768 vcpu);
2769 } else {
6ad78a5c
DCZ
2770 kvm_err("%s: invalid exc code: %d\n", __func__,
2771 exccode);
e685c689
SL
2772 er = EMULATE_FAIL;
2773 }
2774 } else {
d116e812 2775 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
9fbfb06a 2776 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
d116e812
DCZ
2777 /*
2778 * OK we have a Guest TLB entry, now inject it into the
2779 * shadow host TLB
2780 */
7e3d2a75
JH
2781 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
2782 va)) {
9b731bcf
JH
2783 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2784 __func__, va, index, vcpu,
2785 read_c0_entryhi());
2786 er = EMULATE_FAIL;
2787 }
e685c689
SL
2788 }
2789 }
2790
2791 return er;
2792}