]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/kvm/emulate.c
KVM: MIPS/MMU: Invalidate GVA PTs on ASID changes
[mirror_ubuntu-artful-kernel.git] / arch / mips / kvm / emulate.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
26 #include <asm/inst.h>
27
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31
32 #include "interrupt.h"
33 #include "commpage.h"
34
35 #include "trace.h"
36
37 /*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42 unsigned long instpc)
43 {
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST;
49
50 if (epc & 3)
51 goto unaligned;
52
53 /* Read the instruction */
54 insn.word = kvm_get_inst((u32 *) epc, vcpu);
55
56 if (insn.word == KVM_INVALID_INST)
57 return KVM_INVALID_INST;
58
59 switch (insn.i_format.opcode) {
60 /* jr and jalr are in r_format format. */
61 case spec_op:
62 switch (insn.r_format.func) {
63 case jalr_op:
64 arch->gprs[insn.r_format.rd] = epc + 8;
65 /* Fall through */
66 case jr_op:
67 nextpc = arch->gprs[insn.r_format.rs];
68 break;
69 }
70 break;
71
72 /*
73 * This group contains:
74 * bltz_op, bgez_op, bltzl_op, bgezl_op,
75 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
76 */
77 case bcond_op:
78 switch (insn.i_format.rt) {
79 case bltz_op:
80 case bltzl_op:
81 if ((long)arch->gprs[insn.i_format.rs] < 0)
82 epc = epc + 4 + (insn.i_format.simmediate << 2);
83 else
84 epc += 8;
85 nextpc = epc;
86 break;
87
88 case bgez_op:
89 case bgezl_op:
90 if ((long)arch->gprs[insn.i_format.rs] >= 0)
91 epc = epc + 4 + (insn.i_format.simmediate << 2);
92 else
93 epc += 8;
94 nextpc = epc;
95 break;
96
97 case bltzal_op:
98 case bltzall_op:
99 arch->gprs[31] = epc + 8;
100 if ((long)arch->gprs[insn.i_format.rs] < 0)
101 epc = epc + 4 + (insn.i_format.simmediate << 2);
102 else
103 epc += 8;
104 nextpc = epc;
105 break;
106
107 case bgezal_op:
108 case bgezall_op:
109 arch->gprs[31] = epc + 8;
110 if ((long)arch->gprs[insn.i_format.rs] >= 0)
111 epc = epc + 4 + (insn.i_format.simmediate << 2);
112 else
113 epc += 8;
114 nextpc = epc;
115 break;
116 case bposge32_op:
117 if (!cpu_has_dsp)
118 goto sigill;
119
120 dspcontrol = rddsp(0x01);
121
122 if (dspcontrol >= 32)
123 epc = epc + 4 + (insn.i_format.simmediate << 2);
124 else
125 epc += 8;
126 nextpc = epc;
127 break;
128 }
129 break;
130
131 /* These are unconditional and in j_format. */
132 case jal_op:
133 arch->gprs[31] = instpc + 8;
134 case j_op:
135 epc += 4;
136 epc >>= 28;
137 epc <<= 28;
138 epc |= (insn.j_format.target << 2);
139 nextpc = epc;
140 break;
141
142 /* These are conditional and in i_format. */
143 case beq_op:
144 case beql_op:
145 if (arch->gprs[insn.i_format.rs] ==
146 arch->gprs[insn.i_format.rt])
147 epc = epc + 4 + (insn.i_format.simmediate << 2);
148 else
149 epc += 8;
150 nextpc = epc;
151 break;
152
153 case bne_op:
154 case bnel_op:
155 if (arch->gprs[insn.i_format.rs] !=
156 arch->gprs[insn.i_format.rt])
157 epc = epc + 4 + (insn.i_format.simmediate << 2);
158 else
159 epc += 8;
160 nextpc = epc;
161 break;
162
163 case blez_op: /* POP06 */
164 #ifndef CONFIG_CPU_MIPSR6
165 case blezl_op: /* removed in R6 */
166 #endif
167 if (insn.i_format.rt != 0)
168 goto compact_branch;
169 if ((long)arch->gprs[insn.i_format.rs] <= 0)
170 epc = epc + 4 + (insn.i_format.simmediate << 2);
171 else
172 epc += 8;
173 nextpc = epc;
174 break;
175
176 case bgtz_op: /* POP07 */
177 #ifndef CONFIG_CPU_MIPSR6
178 case bgtzl_op: /* removed in R6 */
179 #endif
180 if (insn.i_format.rt != 0)
181 goto compact_branch;
182 if ((long)arch->gprs[insn.i_format.rs] > 0)
183 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 else
185 epc += 8;
186 nextpc = epc;
187 break;
188
189 /* And now the FPA/cp1 branch instructions. */
190 case cop1_op:
191 kvm_err("%s: unsupported cop1_op\n", __func__);
192 break;
193
194 #ifdef CONFIG_CPU_MIPSR6
195 /* R6 added the following compact branches with forbidden slots */
196 case blezl_op: /* POP26 */
197 case bgtzl_op: /* POP27 */
198 /* only rt == 0 isn't compact branch */
199 if (insn.i_format.rt != 0)
200 goto compact_branch;
201 break;
202 case pop10_op:
203 case pop30_op:
204 /* only rs == rt == 0 is reserved, rest are compact branches */
205 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
206 goto compact_branch;
207 break;
208 case pop66_op:
209 case pop76_op:
210 /* only rs == 0 isn't compact branch */
211 if (insn.i_format.rs != 0)
212 goto compact_branch;
213 break;
214 compact_branch:
215 /*
216 * If we've hit an exception on the forbidden slot, then
217 * the branch must not have been taken.
218 */
219 epc += 8;
220 nextpc = epc;
221 break;
222 #else
223 compact_branch:
224 /* Compact branches not supported before R6 */
225 break;
226 #endif
227 }
228
229 return nextpc;
230
231 unaligned:
232 kvm_err("%s: unaligned epc\n", __func__);
233 return nextpc;
234
235 sigill:
236 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
237 return nextpc;
238 }
239
240 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
241 {
242 unsigned long branch_pc;
243 enum emulation_result er = EMULATE_DONE;
244
245 if (cause & CAUSEF_BD) {
246 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
247 if (branch_pc == KVM_INVALID_INST) {
248 er = EMULATE_FAIL;
249 } else {
250 vcpu->arch.pc = branch_pc;
251 kvm_debug("BD update_pc(): New PC: %#lx\n",
252 vcpu->arch.pc);
253 }
254 } else
255 vcpu->arch.pc += 4;
256
257 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
258
259 return er;
260 }
261
262 /**
263 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
264 * @vcpu: Virtual CPU.
265 *
266 * Returns: 1 if the CP0_Count timer is disabled by either the guest
267 * CP0_Cause.DC bit or the count_ctl.DC bit.
268 * 0 otherwise (in which case CP0_Count timer is running).
269 */
270 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
271 {
272 struct mips_coproc *cop0 = vcpu->arch.cop0;
273
274 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
275 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
276 }
277
278 /**
279 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
280 *
281 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
282 *
283 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
284 */
285 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
286 {
287 s64 now_ns, periods;
288 u64 delta;
289
290 now_ns = ktime_to_ns(now);
291 delta = now_ns + vcpu->arch.count_dyn_bias;
292
293 if (delta >= vcpu->arch.count_period) {
294 /* If delta is out of safe range the bias needs adjusting */
295 periods = div64_s64(now_ns, vcpu->arch.count_period);
296 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
297 /* Recalculate delta with new bias */
298 delta = now_ns + vcpu->arch.count_dyn_bias;
299 }
300
301 /*
302 * We've ensured that:
303 * delta < count_period
304 *
305 * Therefore the intermediate delta*count_hz will never overflow since
306 * at the boundary condition:
307 * delta = count_period
308 * delta = NSEC_PER_SEC * 2^32 / count_hz
309 * delta * count_hz = NSEC_PER_SEC * 2^32
310 */
311 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
312 }
313
314 /**
315 * kvm_mips_count_time() - Get effective current time.
316 * @vcpu: Virtual CPU.
317 *
318 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
319 * except when the master disable bit is set in count_ctl, in which case it is
320 * count_resume, i.e. the time that the count was disabled.
321 *
322 * Returns: Effective monotonic ktime for CP0_Count.
323 */
324 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
325 {
326 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
327 return vcpu->arch.count_resume;
328
329 return ktime_get();
330 }
331
332 /**
333 * kvm_mips_read_count_running() - Read the current count value as if running.
334 * @vcpu: Virtual CPU.
335 * @now: Kernel time to read CP0_Count at.
336 *
337 * Returns the current guest CP0_Count register at time @now and handles if the
338 * timer interrupt is pending and hasn't been handled yet.
339 *
340 * Returns: The current value of the guest CP0_Count register.
341 */
342 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
343 {
344 struct mips_coproc *cop0 = vcpu->arch.cop0;
345 ktime_t expires, threshold;
346 u32 count, compare;
347 int running;
348
349 /* Calculate the biased and scaled guest CP0_Count */
350 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
351 compare = kvm_read_c0_guest_compare(cop0);
352
353 /*
354 * Find whether CP0_Count has reached the closest timer interrupt. If
355 * not, we shouldn't inject it.
356 */
357 if ((s32)(count - compare) < 0)
358 return count;
359
360 /*
361 * The CP0_Count we're going to return has already reached the closest
362 * timer interrupt. Quickly check if it really is a new interrupt by
363 * looking at whether the interval until the hrtimer expiry time is
364 * less than 1/4 of the timer period.
365 */
366 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
367 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
368 if (ktime_before(expires, threshold)) {
369 /*
370 * Cancel it while we handle it so there's no chance of
371 * interference with the timeout handler.
372 */
373 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
374
375 /* Nothing should be waiting on the timeout */
376 kvm_mips_callbacks->queue_timer_int(vcpu);
377
378 /*
379 * Restart the timer if it was running based on the expiry time
380 * we read, so that we don't push it back 2 periods.
381 */
382 if (running) {
383 expires = ktime_add_ns(expires,
384 vcpu->arch.count_period);
385 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
386 HRTIMER_MODE_ABS);
387 }
388 }
389
390 return count;
391 }
392
393 /**
394 * kvm_mips_read_count() - Read the current count value.
395 * @vcpu: Virtual CPU.
396 *
397 * Read the current guest CP0_Count value, taking into account whether the timer
398 * is stopped.
399 *
400 * Returns: The current guest CP0_Count value.
401 */
402 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
403 {
404 struct mips_coproc *cop0 = vcpu->arch.cop0;
405
406 /* If count disabled just read static copy of count */
407 if (kvm_mips_count_disabled(vcpu))
408 return kvm_read_c0_guest_count(cop0);
409
410 return kvm_mips_read_count_running(vcpu, ktime_get());
411 }
412
413 /**
414 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
415 * @vcpu: Virtual CPU.
416 * @count: Output pointer for CP0_Count value at point of freeze.
417 *
418 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
419 * at the point it was frozen. It is guaranteed that any pending interrupts at
420 * the point it was frozen are handled, and none after that point.
421 *
422 * This is useful where the time/CP0_Count is needed in the calculation of the
423 * new parameters.
424 *
425 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
426 *
427 * Returns: The ktime at the point of freeze.
428 */
429 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
430 {
431 ktime_t now;
432
433 /* stop hrtimer before finding time */
434 hrtimer_cancel(&vcpu->arch.comparecount_timer);
435 now = ktime_get();
436
437 /* find count at this point and handle pending hrtimer */
438 *count = kvm_mips_read_count_running(vcpu, now);
439
440 return now;
441 }
442
443 /**
444 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
445 * @vcpu: Virtual CPU.
446 * @now: ktime at point of resume.
447 * @count: CP0_Count at point of resume.
448 *
449 * Resumes the timer and updates the timer expiry based on @now and @count.
450 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
451 * parameters need to be changed.
452 *
453 * It is guaranteed that a timer interrupt immediately after resume will be
454 * handled, but not if CP_Compare is exactly at @count. That case is already
455 * handled by kvm_mips_freeze_timer().
456 *
457 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
458 */
459 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
460 ktime_t now, u32 count)
461 {
462 struct mips_coproc *cop0 = vcpu->arch.cop0;
463 u32 compare;
464 u64 delta;
465 ktime_t expire;
466
467 /* Calculate timeout (wrap 0 to 2^32) */
468 compare = kvm_read_c0_guest_compare(cop0);
469 delta = (u64)(u32)(compare - count - 1) + 1;
470 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
471 expire = ktime_add_ns(now, delta);
472
473 /* Update hrtimer to use new timeout */
474 hrtimer_cancel(&vcpu->arch.comparecount_timer);
475 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
476 }
477
478 /**
479 * kvm_mips_write_count() - Modify the count and update timer.
480 * @vcpu: Virtual CPU.
481 * @count: Guest CP0_Count value to set.
482 *
483 * Sets the CP0_Count value and updates the timer accordingly.
484 */
485 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
486 {
487 struct mips_coproc *cop0 = vcpu->arch.cop0;
488 ktime_t now;
489
490 /* Calculate bias */
491 now = kvm_mips_count_time(vcpu);
492 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
493
494 if (kvm_mips_count_disabled(vcpu))
495 /* The timer's disabled, adjust the static count */
496 kvm_write_c0_guest_count(cop0, count);
497 else
498 /* Update timeout */
499 kvm_mips_resume_hrtimer(vcpu, now, count);
500 }
501
502 /**
503 * kvm_mips_init_count() - Initialise timer.
504 * @vcpu: Virtual CPU.
505 *
506 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
507 * it going if it's enabled.
508 */
509 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
510 {
511 /* 100 MHz */
512 vcpu->arch.count_hz = 100*1000*1000;
513 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
514 vcpu->arch.count_hz);
515 vcpu->arch.count_dyn_bias = 0;
516
517 /* Starting at 0 */
518 kvm_mips_write_count(vcpu, 0);
519 }
520
521 /**
522 * kvm_mips_set_count_hz() - Update the frequency of the timer.
523 * @vcpu: Virtual CPU.
524 * @count_hz: Frequency of CP0_Count timer in Hz.
525 *
526 * Change the frequency of the CP0_Count timer. This is done atomically so that
527 * CP0_Count is continuous and no timer interrupt is lost.
528 *
529 * Returns: -EINVAL if @count_hz is out of range.
530 * 0 on success.
531 */
532 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
533 {
534 struct mips_coproc *cop0 = vcpu->arch.cop0;
535 int dc;
536 ktime_t now;
537 u32 count;
538
539 /* ensure the frequency is in a sensible range... */
540 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
541 return -EINVAL;
542 /* ... and has actually changed */
543 if (vcpu->arch.count_hz == count_hz)
544 return 0;
545
546 /* Safely freeze timer so we can keep it continuous */
547 dc = kvm_mips_count_disabled(vcpu);
548 if (dc) {
549 now = kvm_mips_count_time(vcpu);
550 count = kvm_read_c0_guest_count(cop0);
551 } else {
552 now = kvm_mips_freeze_hrtimer(vcpu, &count);
553 }
554
555 /* Update the frequency */
556 vcpu->arch.count_hz = count_hz;
557 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
558 vcpu->arch.count_dyn_bias = 0;
559
560 /* Calculate adjusted bias so dynamic count is unchanged */
561 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
562
563 /* Update and resume hrtimer */
564 if (!dc)
565 kvm_mips_resume_hrtimer(vcpu, now, count);
566 return 0;
567 }
568
569 /**
570 * kvm_mips_write_compare() - Modify compare and update timer.
571 * @vcpu: Virtual CPU.
572 * @compare: New CP0_Compare value.
573 * @ack: Whether to acknowledge timer interrupt.
574 *
575 * Update CP0_Compare to a new value and update the timeout.
576 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
577 * any pending timer interrupt is preserved.
578 */
579 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
580 {
581 struct mips_coproc *cop0 = vcpu->arch.cop0;
582 int dc;
583 u32 old_compare = kvm_read_c0_guest_compare(cop0);
584 ktime_t now;
585 u32 count;
586
587 /* if unchanged, must just be an ack */
588 if (old_compare == compare) {
589 if (!ack)
590 return;
591 kvm_mips_callbacks->dequeue_timer_int(vcpu);
592 kvm_write_c0_guest_compare(cop0, compare);
593 return;
594 }
595
596 /* freeze_hrtimer() takes care of timer interrupts <= count */
597 dc = kvm_mips_count_disabled(vcpu);
598 if (!dc)
599 now = kvm_mips_freeze_hrtimer(vcpu, &count);
600
601 if (ack)
602 kvm_mips_callbacks->dequeue_timer_int(vcpu);
603
604 kvm_write_c0_guest_compare(cop0, compare);
605
606 /* resume_hrtimer() takes care of timer interrupts > count */
607 if (!dc)
608 kvm_mips_resume_hrtimer(vcpu, now, count);
609 }
610
611 /**
612 * kvm_mips_count_disable() - Disable count.
613 * @vcpu: Virtual CPU.
614 *
615 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
616 * time will be handled but not after.
617 *
618 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
619 * count_ctl.DC has been set (count disabled).
620 *
621 * Returns: The time that the timer was stopped.
622 */
623 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
624 {
625 struct mips_coproc *cop0 = vcpu->arch.cop0;
626 u32 count;
627 ktime_t now;
628
629 /* Stop hrtimer */
630 hrtimer_cancel(&vcpu->arch.comparecount_timer);
631
632 /* Set the static count from the dynamic count, handling pending TI */
633 now = ktime_get();
634 count = kvm_mips_read_count_running(vcpu, now);
635 kvm_write_c0_guest_count(cop0, count);
636
637 return now;
638 }
639
640 /**
641 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
642 * @vcpu: Virtual CPU.
643 *
644 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
645 * before the final stop time will be handled if the timer isn't disabled by
646 * count_ctl.DC, but not after.
647 *
648 * Assumes CP0_Cause.DC is clear (count enabled).
649 */
650 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
651 {
652 struct mips_coproc *cop0 = vcpu->arch.cop0;
653
654 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
655 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
656 kvm_mips_count_disable(vcpu);
657 }
658
659 /**
660 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
661 * @vcpu: Virtual CPU.
662 *
663 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
664 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
665 * potentially before even returning, so the caller should be careful with
666 * ordering of CP0_Cause modifications so as not to lose it.
667 *
668 * Assumes CP0_Cause.DC is set (count disabled).
669 */
670 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
671 {
672 struct mips_coproc *cop0 = vcpu->arch.cop0;
673 u32 count;
674
675 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
676
677 /*
678 * Set the dynamic count to match the static count.
679 * This starts the hrtimer if count_ctl.DC allows it.
680 * Otherwise it conveniently updates the biases.
681 */
682 count = kvm_read_c0_guest_count(cop0);
683 kvm_mips_write_count(vcpu, count);
684 }
685
686 /**
687 * kvm_mips_set_count_ctl() - Update the count control KVM register.
688 * @vcpu: Virtual CPU.
689 * @count_ctl: Count control register new value.
690 *
691 * Set the count control KVM register. The timer is updated accordingly.
692 *
693 * Returns: -EINVAL if reserved bits are set.
694 * 0 on success.
695 */
696 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
697 {
698 struct mips_coproc *cop0 = vcpu->arch.cop0;
699 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
700 s64 delta;
701 ktime_t expire, now;
702 u32 count, compare;
703
704 /* Only allow defined bits to be changed */
705 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
706 return -EINVAL;
707
708 /* Apply new value */
709 vcpu->arch.count_ctl = count_ctl;
710
711 /* Master CP0_Count disable */
712 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
713 /* Is CP0_Cause.DC already disabling CP0_Count? */
714 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
715 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
716 /* Just record the current time */
717 vcpu->arch.count_resume = ktime_get();
718 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
719 /* disable timer and record current time */
720 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
721 } else {
722 /*
723 * Calculate timeout relative to static count at resume
724 * time (wrap 0 to 2^32).
725 */
726 count = kvm_read_c0_guest_count(cop0);
727 compare = kvm_read_c0_guest_compare(cop0);
728 delta = (u64)(u32)(compare - count - 1) + 1;
729 delta = div_u64(delta * NSEC_PER_SEC,
730 vcpu->arch.count_hz);
731 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
732
733 /* Handle pending interrupt */
734 now = ktime_get();
735 if (ktime_compare(now, expire) >= 0)
736 /* Nothing should be waiting on the timeout */
737 kvm_mips_callbacks->queue_timer_int(vcpu);
738
739 /* Resume hrtimer without changing bias */
740 count = kvm_mips_read_count_running(vcpu, now);
741 kvm_mips_resume_hrtimer(vcpu, now, count);
742 }
743 }
744
745 return 0;
746 }
747
748 /**
749 * kvm_mips_set_count_resume() - Update the count resume KVM register.
750 * @vcpu: Virtual CPU.
751 * @count_resume: Count resume register new value.
752 *
753 * Set the count resume KVM register.
754 *
755 * Returns: -EINVAL if out of valid range (0..now).
756 * 0 on success.
757 */
758 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
759 {
760 /*
761 * It doesn't make sense for the resume time to be in the future, as it
762 * would be possible for the next interrupt to be more than a full
763 * period in the future.
764 */
765 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
766 return -EINVAL;
767
768 vcpu->arch.count_resume = ns_to_ktime(count_resume);
769 return 0;
770 }
771
772 /**
773 * kvm_mips_count_timeout() - Push timer forward on timeout.
774 * @vcpu: Virtual CPU.
775 *
776 * Handle an hrtimer event by push the hrtimer forward a period.
777 *
778 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
779 */
780 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
781 {
782 /* Add the Count period to the current expiry time */
783 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
784 vcpu->arch.count_period);
785 return HRTIMER_RESTART;
786 }
787
788 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
789 {
790 struct mips_coproc *cop0 = vcpu->arch.cop0;
791 enum emulation_result er = EMULATE_DONE;
792
793 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
794 kvm_clear_c0_guest_status(cop0, ST0_ERL);
795 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
796 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
797 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
798 kvm_read_c0_guest_epc(cop0));
799 kvm_clear_c0_guest_status(cop0, ST0_EXL);
800 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
801
802 } else {
803 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
804 vcpu->arch.pc);
805 er = EMULATE_FAIL;
806 }
807
808 return er;
809 }
810
811 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
812 {
813 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
814 vcpu->arch.pending_exceptions);
815
816 ++vcpu->stat.wait_exits;
817 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
818 if (!vcpu->arch.pending_exceptions) {
819 vcpu->arch.wait = 1;
820 kvm_vcpu_block(vcpu);
821
822 /*
823 * We we are runnable, then definitely go off to user space to
824 * check if any I/O interrupts are pending.
825 */
826 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
827 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
828 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
829 }
830 }
831
832 return EMULATE_DONE;
833 }
834
835 /*
836 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
837 * we can catch this, if things ever change
838 */
839 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
840 {
841 struct mips_coproc *cop0 = vcpu->arch.cop0;
842 unsigned long pc = vcpu->arch.pc;
843
844 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
845 return EMULATE_FAIL;
846 }
847
848 /**
849 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
850 * @vcpu: VCPU with changed mappings.
851 * @tlb: TLB entry being removed.
852 *
853 * This is called to indicate a single change in guest MMU mappings, so that we
854 * can arrange TLB flushes on this and other CPUs.
855 */
856 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
857 struct kvm_mips_tlb *tlb)
858 {
859 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
860 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
861 int cpu, i;
862 bool user;
863
864 /* No need to flush for entries which are already invalid */
865 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
866 return;
867 /* User address space doesn't need flushing for KSeg2/3 changes */
868 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
869
870 preempt_disable();
871
872 /*
873 * Probe the shadow host TLB for the entry being overwritten, if one
874 * matches, invalidate it
875 */
876 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
877
878 /* Invalidate the whole ASID on other CPUs */
879 cpu = smp_processor_id();
880 for_each_possible_cpu(i) {
881 if (i == cpu)
882 continue;
883 if (user)
884 cpu_context(i, user_mm) = 0;
885 cpu_context(i, kern_mm) = 0;
886 }
887
888 preempt_enable();
889 }
890
891 /* Write Guest TLB Entry @ Index */
892 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
893 {
894 struct mips_coproc *cop0 = vcpu->arch.cop0;
895 int index = kvm_read_c0_guest_index(cop0);
896 struct kvm_mips_tlb *tlb = NULL;
897 unsigned long pc = vcpu->arch.pc;
898
899 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
900 kvm_debug("%s: illegal index: %d\n", __func__, index);
901 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
902 pc, index, kvm_read_c0_guest_entryhi(cop0),
903 kvm_read_c0_guest_entrylo0(cop0),
904 kvm_read_c0_guest_entrylo1(cop0),
905 kvm_read_c0_guest_pagemask(cop0));
906 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
907 }
908
909 tlb = &vcpu->arch.guest_tlb[index];
910
911 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
912
913 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
914 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
915 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
916 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
917
918 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
919 pc, index, kvm_read_c0_guest_entryhi(cop0),
920 kvm_read_c0_guest_entrylo0(cop0),
921 kvm_read_c0_guest_entrylo1(cop0),
922 kvm_read_c0_guest_pagemask(cop0));
923
924 return EMULATE_DONE;
925 }
926
927 /* Write Guest TLB Entry @ Random Index */
928 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
929 {
930 struct mips_coproc *cop0 = vcpu->arch.cop0;
931 struct kvm_mips_tlb *tlb = NULL;
932 unsigned long pc = vcpu->arch.pc;
933 int index;
934
935 get_random_bytes(&index, sizeof(index));
936 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
937
938 tlb = &vcpu->arch.guest_tlb[index];
939
940 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
941
942 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
943 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
944 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
945 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
946
947 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
948 pc, index, kvm_read_c0_guest_entryhi(cop0),
949 kvm_read_c0_guest_entrylo0(cop0),
950 kvm_read_c0_guest_entrylo1(cop0));
951
952 return EMULATE_DONE;
953 }
954
955 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
956 {
957 struct mips_coproc *cop0 = vcpu->arch.cop0;
958 long entryhi = kvm_read_c0_guest_entryhi(cop0);
959 unsigned long pc = vcpu->arch.pc;
960 int index = -1;
961
962 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
963
964 kvm_write_c0_guest_index(cop0, index);
965
966 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
967 index);
968
969 return EMULATE_DONE;
970 }
971
972 /**
973 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
974 * @vcpu: Virtual CPU.
975 *
976 * Finds the mask of bits which are writable in the guest's Config1 CP0
977 * register, by userland (currently read-only to the guest).
978 */
979 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
980 {
981 unsigned int mask = 0;
982
983 /* Permit FPU to be present if FPU is supported */
984 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
985 mask |= MIPS_CONF1_FP;
986
987 return mask;
988 }
989
990 /**
991 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
992 * @vcpu: Virtual CPU.
993 *
994 * Finds the mask of bits which are writable in the guest's Config3 CP0
995 * register, by userland (currently read-only to the guest).
996 */
997 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
998 {
999 /* Config4 and ULRI are optional */
1000 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
1001
1002 /* Permit MSA to be present if MSA is supported */
1003 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1004 mask |= MIPS_CONF3_MSA;
1005
1006 return mask;
1007 }
1008
1009 /**
1010 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1011 * @vcpu: Virtual CPU.
1012 *
1013 * Finds the mask of bits which are writable in the guest's Config4 CP0
1014 * register, by userland (currently read-only to the guest).
1015 */
1016 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1017 {
1018 /* Config5 is optional */
1019 unsigned int mask = MIPS_CONF_M;
1020
1021 /* KScrExist */
1022 mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
1023
1024 return mask;
1025 }
1026
1027 /**
1028 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1029 * @vcpu: Virtual CPU.
1030 *
1031 * Finds the mask of bits which are writable in the guest's Config5 CP0
1032 * register, by the guest itself.
1033 */
1034 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1035 {
1036 unsigned int mask = 0;
1037
1038 /* Permit MSAEn changes if MSA supported and enabled */
1039 if (kvm_mips_guest_has_msa(&vcpu->arch))
1040 mask |= MIPS_CONF5_MSAEN;
1041
1042 /*
1043 * Permit guest FPU mode changes if FPU is enabled and the relevant
1044 * feature exists according to FIR register.
1045 */
1046 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1047 if (cpu_has_fre)
1048 mask |= MIPS_CONF5_FRE;
1049 /* We don't support UFR or UFE */
1050 }
1051
1052 return mask;
1053 }
1054
1055 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1056 u32 *opc, u32 cause,
1057 struct kvm_run *run,
1058 struct kvm_vcpu *vcpu)
1059 {
1060 struct mips_coproc *cop0 = vcpu->arch.cop0;
1061 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1062 enum emulation_result er = EMULATE_DONE;
1063 u32 rt, rd, sel;
1064 unsigned long curr_pc;
1065 int cpu, i;
1066
1067 /*
1068 * Update PC and hold onto current PC in case there is
1069 * an error and we want to rollback the PC
1070 */
1071 curr_pc = vcpu->arch.pc;
1072 er = update_pc(vcpu, cause);
1073 if (er == EMULATE_FAIL)
1074 return er;
1075
1076 if (inst.co_format.co) {
1077 switch (inst.co_format.func) {
1078 case tlbr_op: /* Read indexed TLB entry */
1079 er = kvm_mips_emul_tlbr(vcpu);
1080 break;
1081 case tlbwi_op: /* Write indexed */
1082 er = kvm_mips_emul_tlbwi(vcpu);
1083 break;
1084 case tlbwr_op: /* Write random */
1085 er = kvm_mips_emul_tlbwr(vcpu);
1086 break;
1087 case tlbp_op: /* TLB Probe */
1088 er = kvm_mips_emul_tlbp(vcpu);
1089 break;
1090 case rfe_op:
1091 kvm_err("!!!COP0_RFE!!!\n");
1092 break;
1093 case eret_op:
1094 er = kvm_mips_emul_eret(vcpu);
1095 goto dont_update_pc;
1096 case wait_op:
1097 er = kvm_mips_emul_wait(vcpu);
1098 break;
1099 }
1100 } else {
1101 rt = inst.c0r_format.rt;
1102 rd = inst.c0r_format.rd;
1103 sel = inst.c0r_format.sel;
1104
1105 switch (inst.c0r_format.rs) {
1106 case mfc_op:
1107 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1108 cop0->stat[rd][sel]++;
1109 #endif
1110 /* Get reg */
1111 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1112 vcpu->arch.gprs[rt] =
1113 (s32)kvm_mips_read_count(vcpu);
1114 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1115 vcpu->arch.gprs[rt] = 0x0;
1116 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1117 kvm_mips_trans_mfc0(inst, opc, vcpu);
1118 #endif
1119 } else {
1120 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1121
1122 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1123 kvm_mips_trans_mfc0(inst, opc, vcpu);
1124 #endif
1125 }
1126
1127 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1128 KVM_TRACE_COP0(rd, sel),
1129 vcpu->arch.gprs[rt]);
1130 break;
1131
1132 case dmfc_op:
1133 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1134
1135 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1136 KVM_TRACE_COP0(rd, sel),
1137 vcpu->arch.gprs[rt]);
1138 break;
1139
1140 case mtc_op:
1141 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1142 cop0->stat[rd][sel]++;
1143 #endif
1144 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1145 KVM_TRACE_COP0(rd, sel),
1146 vcpu->arch.gprs[rt]);
1147
1148 if ((rd == MIPS_CP0_TLB_INDEX)
1149 && (vcpu->arch.gprs[rt] >=
1150 KVM_MIPS_GUEST_TLB_SIZE)) {
1151 kvm_err("Invalid TLB Index: %ld",
1152 vcpu->arch.gprs[rt]);
1153 er = EMULATE_FAIL;
1154 break;
1155 }
1156 #define C0_EBASE_CORE_MASK 0xff
1157 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1158 /* Preserve CORE number */
1159 kvm_change_c0_guest_ebase(cop0,
1160 ~(C0_EBASE_CORE_MASK),
1161 vcpu->arch.gprs[rt]);
1162 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1163 kvm_read_c0_guest_ebase(cop0));
1164 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1165 u32 nasid =
1166 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1167 if (((kvm_read_c0_guest_entryhi(cop0) &
1168 KVM_ENTRYHI_ASID) != nasid)) {
1169 trace_kvm_asid_change(vcpu,
1170 kvm_read_c0_guest_entryhi(cop0)
1171 & KVM_ENTRYHI_ASID,
1172 nasid);
1173
1174 /*
1175 * Flush entries from the GVA page
1176 * tables.
1177 * Guest user page table will get
1178 * flushed lazily on re-entry to guest
1179 * user if the guest ASID actually
1180 * changes.
1181 */
1182 kvm_mips_flush_gva_pt(kern_mm->pgd,
1183 KMF_KERN);
1184
1185 /*
1186 * Regenerate/invalidate kernel MMU
1187 * context.
1188 * The user MMU context will be
1189 * regenerated lazily on re-entry to
1190 * guest user if the guest ASID actually
1191 * changes.
1192 */
1193 preempt_disable();
1194 cpu = smp_processor_id();
1195 kvm_get_new_mmu_context(kern_mm,
1196 cpu, vcpu);
1197 for_each_possible_cpu(i)
1198 if (i != cpu)
1199 cpu_context(i, kern_mm) = 0;
1200 preempt_enable();
1201 }
1202 kvm_write_c0_guest_entryhi(cop0,
1203 vcpu->arch.gprs[rt]);
1204 }
1205 /* Are we writing to COUNT */
1206 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1207 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1208 goto done;
1209 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1210 /* If we are writing to COMPARE */
1211 /* Clear pending timer interrupt, if any */
1212 kvm_mips_write_compare(vcpu,
1213 vcpu->arch.gprs[rt],
1214 true);
1215 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1216 unsigned int old_val, val, change;
1217
1218 old_val = kvm_read_c0_guest_status(cop0);
1219 val = vcpu->arch.gprs[rt];
1220 change = val ^ old_val;
1221
1222 /* Make sure that the NMI bit is never set */
1223 val &= ~ST0_NMI;
1224
1225 /*
1226 * Don't allow CU1 or FR to be set unless FPU
1227 * capability enabled and exists in guest
1228 * configuration.
1229 */
1230 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1231 val &= ~(ST0_CU1 | ST0_FR);
1232
1233 /*
1234 * Also don't allow FR to be set if host doesn't
1235 * support it.
1236 */
1237 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1238 val &= ~ST0_FR;
1239
1240
1241 /* Handle changes in FPU mode */
1242 preempt_disable();
1243
1244 /*
1245 * FPU and Vector register state is made
1246 * UNPREDICTABLE by a change of FR, so don't
1247 * even bother saving it.
1248 */
1249 if (change & ST0_FR)
1250 kvm_drop_fpu(vcpu);
1251
1252 /*
1253 * If MSA state is already live, it is undefined
1254 * how it interacts with FR=0 FPU state, and we
1255 * don't want to hit reserved instruction
1256 * exceptions trying to save the MSA state later
1257 * when CU=1 && FR=1, so play it safe and save
1258 * it first.
1259 */
1260 if (change & ST0_CU1 && !(val & ST0_FR) &&
1261 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1262 kvm_lose_fpu(vcpu);
1263
1264 /*
1265 * Propagate CU1 (FPU enable) changes
1266 * immediately if the FPU context is already
1267 * loaded. When disabling we leave the context
1268 * loaded so it can be quickly enabled again in
1269 * the near future.
1270 */
1271 if (change & ST0_CU1 &&
1272 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1273 change_c0_status(ST0_CU1, val);
1274
1275 preempt_enable();
1276
1277 kvm_write_c0_guest_status(cop0, val);
1278
1279 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1280 /*
1281 * If FPU present, we need CU1/FR bits to take
1282 * effect fairly soon.
1283 */
1284 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1285 kvm_mips_trans_mtc0(inst, opc, vcpu);
1286 #endif
1287 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1288 unsigned int old_val, val, change, wrmask;
1289
1290 old_val = kvm_read_c0_guest_config5(cop0);
1291 val = vcpu->arch.gprs[rt];
1292
1293 /* Only a few bits are writable in Config5 */
1294 wrmask = kvm_mips_config5_wrmask(vcpu);
1295 change = (val ^ old_val) & wrmask;
1296 val = old_val ^ change;
1297
1298
1299 /* Handle changes in FPU/MSA modes */
1300 preempt_disable();
1301
1302 /*
1303 * Propagate FRE changes immediately if the FPU
1304 * context is already loaded.
1305 */
1306 if (change & MIPS_CONF5_FRE &&
1307 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1308 change_c0_config5(MIPS_CONF5_FRE, val);
1309
1310 /*
1311 * Propagate MSAEn changes immediately if the
1312 * MSA context is already loaded. When disabling
1313 * we leave the context loaded so it can be
1314 * quickly enabled again in the near future.
1315 */
1316 if (change & MIPS_CONF5_MSAEN &&
1317 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1318 change_c0_config5(MIPS_CONF5_MSAEN,
1319 val);
1320
1321 preempt_enable();
1322
1323 kvm_write_c0_guest_config5(cop0, val);
1324 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1325 u32 old_cause, new_cause;
1326
1327 old_cause = kvm_read_c0_guest_cause(cop0);
1328 new_cause = vcpu->arch.gprs[rt];
1329 /* Update R/W bits */
1330 kvm_change_c0_guest_cause(cop0, 0x08800300,
1331 new_cause);
1332 /* DC bit enabling/disabling timer? */
1333 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1334 if (new_cause & CAUSEF_DC)
1335 kvm_mips_count_disable_cause(vcpu);
1336 else
1337 kvm_mips_count_enable_cause(vcpu);
1338 }
1339 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1340 u32 mask = MIPS_HWRENA_CPUNUM |
1341 MIPS_HWRENA_SYNCISTEP |
1342 MIPS_HWRENA_CC |
1343 MIPS_HWRENA_CCRES;
1344
1345 if (kvm_read_c0_guest_config3(cop0) &
1346 MIPS_CONF3_ULRI)
1347 mask |= MIPS_HWRENA_ULR;
1348 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1349 } else {
1350 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1351 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1352 kvm_mips_trans_mtc0(inst, opc, vcpu);
1353 #endif
1354 }
1355 break;
1356
1357 case dmtc_op:
1358 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1359 vcpu->arch.pc, rt, rd, sel);
1360 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1361 KVM_TRACE_COP0(rd, sel),
1362 vcpu->arch.gprs[rt]);
1363 er = EMULATE_FAIL;
1364 break;
1365
1366 case mfmc0_op:
1367 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1368 cop0->stat[MIPS_CP0_STATUS][0]++;
1369 #endif
1370 if (rt != 0)
1371 vcpu->arch.gprs[rt] =
1372 kvm_read_c0_guest_status(cop0);
1373 /* EI */
1374 if (inst.mfmc0_format.sc) {
1375 kvm_debug("[%#lx] mfmc0_op: EI\n",
1376 vcpu->arch.pc);
1377 kvm_set_c0_guest_status(cop0, ST0_IE);
1378 } else {
1379 kvm_debug("[%#lx] mfmc0_op: DI\n",
1380 vcpu->arch.pc);
1381 kvm_clear_c0_guest_status(cop0, ST0_IE);
1382 }
1383
1384 break;
1385
1386 case wrpgpr_op:
1387 {
1388 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1389 u32 pss =
1390 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1391 /*
1392 * We don't support any shadow register sets, so
1393 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1394 */
1395 if (css || pss) {
1396 er = EMULATE_FAIL;
1397 break;
1398 }
1399 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1400 vcpu->arch.gprs[rt]);
1401 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1402 }
1403 break;
1404 default:
1405 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1406 vcpu->arch.pc, inst.c0r_format.rs);
1407 er = EMULATE_FAIL;
1408 break;
1409 }
1410 }
1411
1412 done:
1413 /* Rollback PC only if emulation was unsuccessful */
1414 if (er == EMULATE_FAIL)
1415 vcpu->arch.pc = curr_pc;
1416
1417 dont_update_pc:
1418 /*
1419 * This is for special instructions whose emulation
1420 * updates the PC, so do not overwrite the PC under
1421 * any circumstances
1422 */
1423
1424 return er;
1425 }
1426
1427 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1428 u32 cause,
1429 struct kvm_run *run,
1430 struct kvm_vcpu *vcpu)
1431 {
1432 enum emulation_result er = EMULATE_DO_MMIO;
1433 u32 rt;
1434 u32 bytes;
1435 void *data = run->mmio.data;
1436 unsigned long curr_pc;
1437
1438 /*
1439 * Update PC and hold onto current PC in case there is
1440 * an error and we want to rollback the PC
1441 */
1442 curr_pc = vcpu->arch.pc;
1443 er = update_pc(vcpu, cause);
1444 if (er == EMULATE_FAIL)
1445 return er;
1446
1447 rt = inst.i_format.rt;
1448
1449 switch (inst.i_format.opcode) {
1450 case sb_op:
1451 bytes = 1;
1452 if (bytes > sizeof(run->mmio.data)) {
1453 kvm_err("%s: bad MMIO length: %d\n", __func__,
1454 run->mmio.len);
1455 }
1456 run->mmio.phys_addr =
1457 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1458 host_cp0_badvaddr);
1459 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1460 er = EMULATE_FAIL;
1461 break;
1462 }
1463 run->mmio.len = bytes;
1464 run->mmio.is_write = 1;
1465 vcpu->mmio_needed = 1;
1466 vcpu->mmio_is_write = 1;
1467 *(u8 *) data = vcpu->arch.gprs[rt];
1468 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1469 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1470 *(u8 *) data);
1471
1472 break;
1473
1474 case sw_op:
1475 bytes = 4;
1476 if (bytes > sizeof(run->mmio.data)) {
1477 kvm_err("%s: bad MMIO length: %d\n", __func__,
1478 run->mmio.len);
1479 }
1480 run->mmio.phys_addr =
1481 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1482 host_cp0_badvaddr);
1483 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1484 er = EMULATE_FAIL;
1485 break;
1486 }
1487
1488 run->mmio.len = bytes;
1489 run->mmio.is_write = 1;
1490 vcpu->mmio_needed = 1;
1491 vcpu->mmio_is_write = 1;
1492 *(u32 *) data = vcpu->arch.gprs[rt];
1493
1494 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1495 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1496 vcpu->arch.gprs[rt], *(u32 *) data);
1497 break;
1498
1499 case sh_op:
1500 bytes = 2;
1501 if (bytes > sizeof(run->mmio.data)) {
1502 kvm_err("%s: bad MMIO length: %d\n", __func__,
1503 run->mmio.len);
1504 }
1505 run->mmio.phys_addr =
1506 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1507 host_cp0_badvaddr);
1508 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1509 er = EMULATE_FAIL;
1510 break;
1511 }
1512
1513 run->mmio.len = bytes;
1514 run->mmio.is_write = 1;
1515 vcpu->mmio_needed = 1;
1516 vcpu->mmio_is_write = 1;
1517 *(u16 *) data = vcpu->arch.gprs[rt];
1518
1519 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1520 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1521 vcpu->arch.gprs[rt], *(u32 *) data);
1522 break;
1523
1524 default:
1525 kvm_err("Store not yet supported (inst=0x%08x)\n",
1526 inst.word);
1527 er = EMULATE_FAIL;
1528 break;
1529 }
1530
1531 /* Rollback PC if emulation was unsuccessful */
1532 if (er == EMULATE_FAIL)
1533 vcpu->arch.pc = curr_pc;
1534
1535 return er;
1536 }
1537
1538 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1539 u32 cause, struct kvm_run *run,
1540 struct kvm_vcpu *vcpu)
1541 {
1542 enum emulation_result er = EMULATE_DO_MMIO;
1543 unsigned long curr_pc;
1544 u32 op, rt;
1545 u32 bytes;
1546
1547 rt = inst.i_format.rt;
1548 op = inst.i_format.opcode;
1549
1550 /*
1551 * Find the resume PC now while we have safe and easy access to the
1552 * prior branch instruction, and save it for
1553 * kvm_mips_complete_mmio_load() to restore later.
1554 */
1555 curr_pc = vcpu->arch.pc;
1556 er = update_pc(vcpu, cause);
1557 if (er == EMULATE_FAIL)
1558 return er;
1559 vcpu->arch.io_pc = vcpu->arch.pc;
1560 vcpu->arch.pc = curr_pc;
1561
1562 vcpu->arch.io_gpr = rt;
1563
1564 switch (op) {
1565 case lw_op:
1566 bytes = 4;
1567 if (bytes > sizeof(run->mmio.data)) {
1568 kvm_err("%s: bad MMIO length: %d\n", __func__,
1569 run->mmio.len);
1570 er = EMULATE_FAIL;
1571 break;
1572 }
1573 run->mmio.phys_addr =
1574 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1575 host_cp0_badvaddr);
1576 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1577 er = EMULATE_FAIL;
1578 break;
1579 }
1580
1581 run->mmio.len = bytes;
1582 run->mmio.is_write = 0;
1583 vcpu->mmio_needed = 1;
1584 vcpu->mmio_is_write = 0;
1585 break;
1586
1587 case lh_op:
1588 case lhu_op:
1589 bytes = 2;
1590 if (bytes > sizeof(run->mmio.data)) {
1591 kvm_err("%s: bad MMIO length: %d\n", __func__,
1592 run->mmio.len);
1593 er = EMULATE_FAIL;
1594 break;
1595 }
1596 run->mmio.phys_addr =
1597 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1598 host_cp0_badvaddr);
1599 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1600 er = EMULATE_FAIL;
1601 break;
1602 }
1603
1604 run->mmio.len = bytes;
1605 run->mmio.is_write = 0;
1606 vcpu->mmio_needed = 1;
1607 vcpu->mmio_is_write = 0;
1608
1609 if (op == lh_op)
1610 vcpu->mmio_needed = 2;
1611 else
1612 vcpu->mmio_needed = 1;
1613
1614 break;
1615
1616 case lbu_op:
1617 case lb_op:
1618 bytes = 1;
1619 if (bytes > sizeof(run->mmio.data)) {
1620 kvm_err("%s: bad MMIO length: %d\n", __func__,
1621 run->mmio.len);
1622 er = EMULATE_FAIL;
1623 break;
1624 }
1625 run->mmio.phys_addr =
1626 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1627 host_cp0_badvaddr);
1628 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1629 er = EMULATE_FAIL;
1630 break;
1631 }
1632
1633 run->mmio.len = bytes;
1634 run->mmio.is_write = 0;
1635 vcpu->mmio_is_write = 0;
1636
1637 if (op == lb_op)
1638 vcpu->mmio_needed = 2;
1639 else
1640 vcpu->mmio_needed = 1;
1641
1642 break;
1643
1644 default:
1645 kvm_err("Load not yet supported (inst=0x%08x)\n",
1646 inst.word);
1647 er = EMULATE_FAIL;
1648 break;
1649 }
1650
1651 return er;
1652 }
1653
1654 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1655 u32 *opc, u32 cause,
1656 struct kvm_run *run,
1657 struct kvm_vcpu *vcpu)
1658 {
1659 struct mips_coproc *cop0 = vcpu->arch.cop0;
1660 enum emulation_result er = EMULATE_DONE;
1661 u32 cache, op_inst, op, base;
1662 s16 offset;
1663 struct kvm_vcpu_arch *arch = &vcpu->arch;
1664 unsigned long va;
1665 unsigned long curr_pc;
1666
1667 /*
1668 * Update PC and hold onto current PC in case there is
1669 * an error and we want to rollback the PC
1670 */
1671 curr_pc = vcpu->arch.pc;
1672 er = update_pc(vcpu, cause);
1673 if (er == EMULATE_FAIL)
1674 return er;
1675
1676 base = inst.i_format.rs;
1677 op_inst = inst.i_format.rt;
1678 if (cpu_has_mips_r6)
1679 offset = inst.spec3_format.simmediate;
1680 else
1681 offset = inst.i_format.simmediate;
1682 cache = op_inst & CacheOp_Cache;
1683 op = op_inst & CacheOp_Op;
1684
1685 va = arch->gprs[base] + offset;
1686
1687 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1688 cache, op, base, arch->gprs[base], offset);
1689
1690 /*
1691 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1692 * invalidate the caches entirely by stepping through all the
1693 * ways/indexes
1694 */
1695 if (op == Index_Writeback_Inv) {
1696 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1697 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1698 arch->gprs[base], offset);
1699
1700 if (cache == Cache_D)
1701 r4k_blast_dcache();
1702 else if (cache == Cache_I)
1703 r4k_blast_icache();
1704 else {
1705 kvm_err("%s: unsupported CACHE INDEX operation\n",
1706 __func__);
1707 return EMULATE_FAIL;
1708 }
1709
1710 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1711 kvm_mips_trans_cache_index(inst, opc, vcpu);
1712 #endif
1713 goto done;
1714 }
1715
1716 preempt_disable();
1717 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1718 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1719 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1720 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1721 __func__, va, vcpu, read_c0_entryhi());
1722 er = EMULATE_FAIL;
1723 preempt_enable();
1724 goto done;
1725 }
1726 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1727 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1728 int index;
1729
1730 /* If an entry already exists then skip */
1731 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1732 goto skip_fault;
1733
1734 /*
1735 * If address not in the guest TLB, then give the guest a fault,
1736 * the resulting handler will do the right thing
1737 */
1738 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1739 (kvm_read_c0_guest_entryhi
1740 (cop0) & KVM_ENTRYHI_ASID));
1741
1742 if (index < 0) {
1743 vcpu->arch.host_cp0_badvaddr = va;
1744 vcpu->arch.pc = curr_pc;
1745 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1746 vcpu);
1747 preempt_enable();
1748 goto dont_update_pc;
1749 } else {
1750 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1751 /*
1752 * Check if the entry is valid, if not then setup a TLB
1753 * invalid exception to the guest
1754 */
1755 if (!TLB_IS_VALID(*tlb, va)) {
1756 vcpu->arch.host_cp0_badvaddr = va;
1757 vcpu->arch.pc = curr_pc;
1758 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1759 run, vcpu);
1760 preempt_enable();
1761 goto dont_update_pc;
1762 }
1763 /*
1764 * We fault an entry from the guest tlb to the
1765 * shadow host TLB
1766 */
1767 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1768 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1769 __func__, va, index, vcpu,
1770 read_c0_entryhi());
1771 er = EMULATE_FAIL;
1772 preempt_enable();
1773 goto done;
1774 }
1775 }
1776 } else {
1777 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1778 cache, op, base, arch->gprs[base], offset);
1779 er = EMULATE_FAIL;
1780 preempt_enable();
1781 goto done;
1782
1783 }
1784
1785 skip_fault:
1786 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1787 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1788 flush_dcache_line(va);
1789
1790 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1791 /*
1792 * Replace the CACHE instruction, with a SYNCI, not the same,
1793 * but avoids a trap
1794 */
1795 kvm_mips_trans_cache_va(inst, opc, vcpu);
1796 #endif
1797 } else if (op_inst == Hit_Invalidate_I) {
1798 flush_dcache_line(va);
1799 flush_icache_line(va);
1800
1801 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1802 /* Replace the CACHE instruction, with a SYNCI */
1803 kvm_mips_trans_cache_va(inst, opc, vcpu);
1804 #endif
1805 } else {
1806 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1807 cache, op, base, arch->gprs[base], offset);
1808 er = EMULATE_FAIL;
1809 }
1810
1811 preempt_enable();
1812 done:
1813 /* Rollback PC only if emulation was unsuccessful */
1814 if (er == EMULATE_FAIL)
1815 vcpu->arch.pc = curr_pc;
1816
1817 dont_update_pc:
1818 /*
1819 * This is for exceptions whose emulation updates the PC, so do not
1820 * overwrite the PC under any circumstances
1821 */
1822
1823 return er;
1824 }
1825
1826 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1827 struct kvm_run *run,
1828 struct kvm_vcpu *vcpu)
1829 {
1830 union mips_instruction inst;
1831 enum emulation_result er = EMULATE_DONE;
1832
1833 /* Fetch the instruction. */
1834 if (cause & CAUSEF_BD)
1835 opc += 1;
1836
1837 inst.word = kvm_get_inst(opc, vcpu);
1838
1839 switch (inst.r_format.opcode) {
1840 case cop0_op:
1841 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1842 break;
1843 case sb_op:
1844 case sh_op:
1845 case sw_op:
1846 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1847 break;
1848 case lb_op:
1849 case lbu_op:
1850 case lhu_op:
1851 case lh_op:
1852 case lw_op:
1853 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1854 break;
1855
1856 #ifndef CONFIG_CPU_MIPSR6
1857 case cache_op:
1858 ++vcpu->stat.cache_exits;
1859 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1860 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1861 break;
1862 #else
1863 case spec3_op:
1864 switch (inst.spec3_format.func) {
1865 case cache6_op:
1866 ++vcpu->stat.cache_exits;
1867 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1868 er = kvm_mips_emulate_cache(inst, opc, cause, run,
1869 vcpu);
1870 break;
1871 default:
1872 goto unknown;
1873 };
1874 break;
1875 unknown:
1876 #endif
1877
1878 default:
1879 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1880 inst.word);
1881 kvm_arch_vcpu_dump_regs(vcpu);
1882 er = EMULATE_FAIL;
1883 break;
1884 }
1885
1886 return er;
1887 }
1888
1889 enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1890 u32 *opc,
1891 struct kvm_run *run,
1892 struct kvm_vcpu *vcpu)
1893 {
1894 struct mips_coproc *cop0 = vcpu->arch.cop0;
1895 struct kvm_vcpu_arch *arch = &vcpu->arch;
1896 enum emulation_result er = EMULATE_DONE;
1897
1898 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1899 /* save old pc */
1900 kvm_write_c0_guest_epc(cop0, arch->pc);
1901 kvm_set_c0_guest_status(cop0, ST0_EXL);
1902
1903 if (cause & CAUSEF_BD)
1904 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1905 else
1906 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1907
1908 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1909
1910 kvm_change_c0_guest_cause(cop0, (0xff),
1911 (EXCCODE_SYS << CAUSEB_EXCCODE));
1912
1913 /* Set PC to the exception entry point */
1914 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1915
1916 } else {
1917 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1918 er = EMULATE_FAIL;
1919 }
1920
1921 return er;
1922 }
1923
1924 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1925 u32 *opc,
1926 struct kvm_run *run,
1927 struct kvm_vcpu *vcpu)
1928 {
1929 struct mips_coproc *cop0 = vcpu->arch.cop0;
1930 struct kvm_vcpu_arch *arch = &vcpu->arch;
1931 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1932 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1933
1934 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1935 /* save old pc */
1936 kvm_write_c0_guest_epc(cop0, arch->pc);
1937 kvm_set_c0_guest_status(cop0, ST0_EXL);
1938
1939 if (cause & CAUSEF_BD)
1940 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1941 else
1942 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1943
1944 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1945 arch->pc);
1946
1947 /* set pc to the exception entry point */
1948 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1949
1950 } else {
1951 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1952 arch->pc);
1953
1954 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1955 }
1956
1957 kvm_change_c0_guest_cause(cop0, (0xff),
1958 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1959
1960 /* setup badvaddr, context and entryhi registers for the guest */
1961 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1962 /* XXXKYMA: is the context register used by linux??? */
1963 kvm_write_c0_guest_entryhi(cop0, entryhi);
1964 /* Blow away the shadow host TLBs */
1965 kvm_mips_flush_host_tlb(1);
1966
1967 return EMULATE_DONE;
1968 }
1969
1970 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1971 u32 *opc,
1972 struct kvm_run *run,
1973 struct kvm_vcpu *vcpu)
1974 {
1975 struct mips_coproc *cop0 = vcpu->arch.cop0;
1976 struct kvm_vcpu_arch *arch = &vcpu->arch;
1977 unsigned long entryhi =
1978 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1979 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1980
1981 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1982 /* save old pc */
1983 kvm_write_c0_guest_epc(cop0, arch->pc);
1984 kvm_set_c0_guest_status(cop0, ST0_EXL);
1985
1986 if (cause & CAUSEF_BD)
1987 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1988 else
1989 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1990
1991 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1992 arch->pc);
1993
1994 /* set pc to the exception entry point */
1995 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1996
1997 } else {
1998 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1999 arch->pc);
2000 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2001 }
2002
2003 kvm_change_c0_guest_cause(cop0, (0xff),
2004 (EXCCODE_TLBL << CAUSEB_EXCCODE));
2005
2006 /* setup badvaddr, context and entryhi registers for the guest */
2007 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2008 /* XXXKYMA: is the context register used by linux??? */
2009 kvm_write_c0_guest_entryhi(cop0, entryhi);
2010 /* Blow away the shadow host TLBs */
2011 kvm_mips_flush_host_tlb(1);
2012
2013 return EMULATE_DONE;
2014 }
2015
2016 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
2017 u32 *opc,
2018 struct kvm_run *run,
2019 struct kvm_vcpu *vcpu)
2020 {
2021 struct mips_coproc *cop0 = vcpu->arch.cop0;
2022 struct kvm_vcpu_arch *arch = &vcpu->arch;
2023 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2024 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2025
2026 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2027 /* save old pc */
2028 kvm_write_c0_guest_epc(cop0, arch->pc);
2029 kvm_set_c0_guest_status(cop0, ST0_EXL);
2030
2031 if (cause & CAUSEF_BD)
2032 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2033 else
2034 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2035
2036 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2037 arch->pc);
2038
2039 /* Set PC to the exception entry point */
2040 arch->pc = KVM_GUEST_KSEG0 + 0x0;
2041 } else {
2042 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2043 arch->pc);
2044 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2045 }
2046
2047 kvm_change_c0_guest_cause(cop0, (0xff),
2048 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2049
2050 /* setup badvaddr, context and entryhi registers for the guest */
2051 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2052 /* XXXKYMA: is the context register used by linux??? */
2053 kvm_write_c0_guest_entryhi(cop0, entryhi);
2054 /* Blow away the shadow host TLBs */
2055 kvm_mips_flush_host_tlb(1);
2056
2057 return EMULATE_DONE;
2058 }
2059
2060 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2061 u32 *opc,
2062 struct kvm_run *run,
2063 struct kvm_vcpu *vcpu)
2064 {
2065 struct mips_coproc *cop0 = vcpu->arch.cop0;
2066 struct kvm_vcpu_arch *arch = &vcpu->arch;
2067 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2068 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2069
2070 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2071 /* save old pc */
2072 kvm_write_c0_guest_epc(cop0, arch->pc);
2073 kvm_set_c0_guest_status(cop0, ST0_EXL);
2074
2075 if (cause & CAUSEF_BD)
2076 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2077 else
2078 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2079
2080 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2081 arch->pc);
2082
2083 /* Set PC to the exception entry point */
2084 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2085 } else {
2086 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2087 arch->pc);
2088 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2089 }
2090
2091 kvm_change_c0_guest_cause(cop0, (0xff),
2092 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2093
2094 /* setup badvaddr, context and entryhi registers for the guest */
2095 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2096 /* XXXKYMA: is the context register used by linux??? */
2097 kvm_write_c0_guest_entryhi(cop0, entryhi);
2098 /* Blow away the shadow host TLBs */
2099 kvm_mips_flush_host_tlb(1);
2100
2101 return EMULATE_DONE;
2102 }
2103
2104 /* TLBMOD: store into address matching TLB with Dirty bit off */
2105 enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
2106 struct kvm_run *run,
2107 struct kvm_vcpu *vcpu)
2108 {
2109 enum emulation_result er = EMULATE_DONE;
2110 #ifdef DEBUG
2111 struct mips_coproc *cop0 = vcpu->arch.cop0;
2112 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2113 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2114 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
2115 int index;
2116
2117 /* If address not in the guest TLB, then we are in trouble */
2118 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2119 if (index < 0) {
2120 /* XXXKYMA Invalidate and retry */
2121 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr,
2122 !kernel, kernel);
2123 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2124 __func__, entryhi);
2125 kvm_mips_dump_guest_tlbs(vcpu);
2126 kvm_mips_dump_host_tlbs();
2127 return EMULATE_FAIL;
2128 }
2129 #endif
2130
2131 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2132 return er;
2133 }
2134
2135 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2136 u32 *opc,
2137 struct kvm_run *run,
2138 struct kvm_vcpu *vcpu)
2139 {
2140 struct mips_coproc *cop0 = vcpu->arch.cop0;
2141 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2142 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2143 struct kvm_vcpu_arch *arch = &vcpu->arch;
2144
2145 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2146 /* save old pc */
2147 kvm_write_c0_guest_epc(cop0, arch->pc);
2148 kvm_set_c0_guest_status(cop0, ST0_EXL);
2149
2150 if (cause & CAUSEF_BD)
2151 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2152 else
2153 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2154
2155 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2156 arch->pc);
2157
2158 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2159 } else {
2160 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2161 arch->pc);
2162 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2163 }
2164
2165 kvm_change_c0_guest_cause(cop0, (0xff),
2166 (EXCCODE_MOD << CAUSEB_EXCCODE));
2167
2168 /* setup badvaddr, context and entryhi registers for the guest */
2169 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2170 /* XXXKYMA: is the context register used by linux??? */
2171 kvm_write_c0_guest_entryhi(cop0, entryhi);
2172 /* Blow away the shadow host TLBs */
2173 kvm_mips_flush_host_tlb(1);
2174
2175 return EMULATE_DONE;
2176 }
2177
2178 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2179 u32 *opc,
2180 struct kvm_run *run,
2181 struct kvm_vcpu *vcpu)
2182 {
2183 struct mips_coproc *cop0 = vcpu->arch.cop0;
2184 struct kvm_vcpu_arch *arch = &vcpu->arch;
2185
2186 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2187 /* save old pc */
2188 kvm_write_c0_guest_epc(cop0, arch->pc);
2189 kvm_set_c0_guest_status(cop0, ST0_EXL);
2190
2191 if (cause & CAUSEF_BD)
2192 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2193 else
2194 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2195
2196 }
2197
2198 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2199
2200 kvm_change_c0_guest_cause(cop0, (0xff),
2201 (EXCCODE_CPU << CAUSEB_EXCCODE));
2202 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2203
2204 return EMULATE_DONE;
2205 }
2206
2207 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2208 u32 *opc,
2209 struct kvm_run *run,
2210 struct kvm_vcpu *vcpu)
2211 {
2212 struct mips_coproc *cop0 = vcpu->arch.cop0;
2213 struct kvm_vcpu_arch *arch = &vcpu->arch;
2214 enum emulation_result er = EMULATE_DONE;
2215
2216 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2217 /* save old pc */
2218 kvm_write_c0_guest_epc(cop0, arch->pc);
2219 kvm_set_c0_guest_status(cop0, ST0_EXL);
2220
2221 if (cause & CAUSEF_BD)
2222 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2223 else
2224 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2225
2226 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2227
2228 kvm_change_c0_guest_cause(cop0, (0xff),
2229 (EXCCODE_RI << CAUSEB_EXCCODE));
2230
2231 /* Set PC to the exception entry point */
2232 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2233
2234 } else {
2235 kvm_err("Trying to deliver RI when EXL is already set\n");
2236 er = EMULATE_FAIL;
2237 }
2238
2239 return er;
2240 }
2241
2242 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2243 u32 *opc,
2244 struct kvm_run *run,
2245 struct kvm_vcpu *vcpu)
2246 {
2247 struct mips_coproc *cop0 = vcpu->arch.cop0;
2248 struct kvm_vcpu_arch *arch = &vcpu->arch;
2249 enum emulation_result er = EMULATE_DONE;
2250
2251 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2252 /* save old pc */
2253 kvm_write_c0_guest_epc(cop0, arch->pc);
2254 kvm_set_c0_guest_status(cop0, ST0_EXL);
2255
2256 if (cause & CAUSEF_BD)
2257 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2258 else
2259 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2260
2261 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2262
2263 kvm_change_c0_guest_cause(cop0, (0xff),
2264 (EXCCODE_BP << CAUSEB_EXCCODE));
2265
2266 /* Set PC to the exception entry point */
2267 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2268
2269 } else {
2270 kvm_err("Trying to deliver BP when EXL is already set\n");
2271 er = EMULATE_FAIL;
2272 }
2273
2274 return er;
2275 }
2276
2277 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2278 u32 *opc,
2279 struct kvm_run *run,
2280 struct kvm_vcpu *vcpu)
2281 {
2282 struct mips_coproc *cop0 = vcpu->arch.cop0;
2283 struct kvm_vcpu_arch *arch = &vcpu->arch;
2284 enum emulation_result er = EMULATE_DONE;
2285
2286 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2287 /* save old pc */
2288 kvm_write_c0_guest_epc(cop0, arch->pc);
2289 kvm_set_c0_guest_status(cop0, ST0_EXL);
2290
2291 if (cause & CAUSEF_BD)
2292 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2293 else
2294 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2295
2296 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2297
2298 kvm_change_c0_guest_cause(cop0, (0xff),
2299 (EXCCODE_TR << CAUSEB_EXCCODE));
2300
2301 /* Set PC to the exception entry point */
2302 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2303
2304 } else {
2305 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2306 er = EMULATE_FAIL;
2307 }
2308
2309 return er;
2310 }
2311
2312 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2313 u32 *opc,
2314 struct kvm_run *run,
2315 struct kvm_vcpu *vcpu)
2316 {
2317 struct mips_coproc *cop0 = vcpu->arch.cop0;
2318 struct kvm_vcpu_arch *arch = &vcpu->arch;
2319 enum emulation_result er = EMULATE_DONE;
2320
2321 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2322 /* save old pc */
2323 kvm_write_c0_guest_epc(cop0, arch->pc);
2324 kvm_set_c0_guest_status(cop0, ST0_EXL);
2325
2326 if (cause & CAUSEF_BD)
2327 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2328 else
2329 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2330
2331 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2332
2333 kvm_change_c0_guest_cause(cop0, (0xff),
2334 (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2335
2336 /* Set PC to the exception entry point */
2337 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2338
2339 } else {
2340 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2341 er = EMULATE_FAIL;
2342 }
2343
2344 return er;
2345 }
2346
2347 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2348 u32 *opc,
2349 struct kvm_run *run,
2350 struct kvm_vcpu *vcpu)
2351 {
2352 struct mips_coproc *cop0 = vcpu->arch.cop0;
2353 struct kvm_vcpu_arch *arch = &vcpu->arch;
2354 enum emulation_result er = EMULATE_DONE;
2355
2356 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2357 /* save old pc */
2358 kvm_write_c0_guest_epc(cop0, arch->pc);
2359 kvm_set_c0_guest_status(cop0, ST0_EXL);
2360
2361 if (cause & CAUSEF_BD)
2362 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2363 else
2364 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2365
2366 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2367
2368 kvm_change_c0_guest_cause(cop0, (0xff),
2369 (EXCCODE_FPE << CAUSEB_EXCCODE));
2370
2371 /* Set PC to the exception entry point */
2372 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2373
2374 } else {
2375 kvm_err("Trying to deliver FPE when EXL is already set\n");
2376 er = EMULATE_FAIL;
2377 }
2378
2379 return er;
2380 }
2381
2382 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2383 u32 *opc,
2384 struct kvm_run *run,
2385 struct kvm_vcpu *vcpu)
2386 {
2387 struct mips_coproc *cop0 = vcpu->arch.cop0;
2388 struct kvm_vcpu_arch *arch = &vcpu->arch;
2389 enum emulation_result er = EMULATE_DONE;
2390
2391 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2392 /* save old pc */
2393 kvm_write_c0_guest_epc(cop0, arch->pc);
2394 kvm_set_c0_guest_status(cop0, ST0_EXL);
2395
2396 if (cause & CAUSEF_BD)
2397 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2398 else
2399 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2400
2401 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2402
2403 kvm_change_c0_guest_cause(cop0, (0xff),
2404 (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2405
2406 /* Set PC to the exception entry point */
2407 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2408
2409 } else {
2410 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2411 er = EMULATE_FAIL;
2412 }
2413
2414 return er;
2415 }
2416
2417 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2418 struct kvm_run *run,
2419 struct kvm_vcpu *vcpu)
2420 {
2421 struct mips_coproc *cop0 = vcpu->arch.cop0;
2422 struct kvm_vcpu_arch *arch = &vcpu->arch;
2423 enum emulation_result er = EMULATE_DONE;
2424 unsigned long curr_pc;
2425 union mips_instruction inst;
2426
2427 /*
2428 * Update PC and hold onto current PC in case there is
2429 * an error and we want to rollback the PC
2430 */
2431 curr_pc = vcpu->arch.pc;
2432 er = update_pc(vcpu, cause);
2433 if (er == EMULATE_FAIL)
2434 return er;
2435
2436 /* Fetch the instruction. */
2437 if (cause & CAUSEF_BD)
2438 opc += 1;
2439
2440 inst.word = kvm_get_inst(opc, vcpu);
2441
2442 if (inst.word == KVM_INVALID_INST) {
2443 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2444 return EMULATE_FAIL;
2445 }
2446
2447 if (inst.r_format.opcode == spec3_op &&
2448 inst.r_format.func == rdhwr_op &&
2449 inst.r_format.rs == 0 &&
2450 (inst.r_format.re >> 3) == 0) {
2451 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2452 int rd = inst.r_format.rd;
2453 int rt = inst.r_format.rt;
2454 int sel = inst.r_format.re & 0x7;
2455
2456 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2457 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2458 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2459 rd, opc);
2460 goto emulate_ri;
2461 }
2462 switch (rd) {
2463 case MIPS_HWR_CPUNUM: /* CPU number */
2464 arch->gprs[rt] = vcpu->vcpu_id;
2465 break;
2466 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
2467 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2468 current_cpu_data.icache.linesz);
2469 break;
2470 case MIPS_HWR_CC: /* Read count register */
2471 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2472 break;
2473 case MIPS_HWR_CCRES: /* Count register resolution */
2474 switch (current_cpu_data.cputype) {
2475 case CPU_20KC:
2476 case CPU_25KF:
2477 arch->gprs[rt] = 1;
2478 break;
2479 default:
2480 arch->gprs[rt] = 2;
2481 }
2482 break;
2483 case MIPS_HWR_ULR: /* Read UserLocal register */
2484 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2485 break;
2486
2487 default:
2488 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2489 goto emulate_ri;
2490 }
2491
2492 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2493 vcpu->arch.gprs[rt]);
2494 } else {
2495 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2496 opc, inst.word);
2497 goto emulate_ri;
2498 }
2499
2500 return EMULATE_DONE;
2501
2502 emulate_ri:
2503 /*
2504 * Rollback PC (if in branch delay slot then the PC already points to
2505 * branch target), and pass the RI exception to the guest OS.
2506 */
2507 vcpu->arch.pc = curr_pc;
2508 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2509 }
2510
2511 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2512 struct kvm_run *run)
2513 {
2514 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2515 enum emulation_result er = EMULATE_DONE;
2516
2517 if (run->mmio.len > sizeof(*gpr)) {
2518 kvm_err("Bad MMIO length: %d", run->mmio.len);
2519 er = EMULATE_FAIL;
2520 goto done;
2521 }
2522
2523 /* Restore saved resume PC */
2524 vcpu->arch.pc = vcpu->arch.io_pc;
2525
2526 switch (run->mmio.len) {
2527 case 4:
2528 *gpr = *(s32 *) run->mmio.data;
2529 break;
2530
2531 case 2:
2532 if (vcpu->mmio_needed == 2)
2533 *gpr = *(s16 *) run->mmio.data;
2534 else
2535 *gpr = *(u16 *)run->mmio.data;
2536
2537 break;
2538 case 1:
2539 if (vcpu->mmio_needed == 2)
2540 *gpr = *(s8 *) run->mmio.data;
2541 else
2542 *gpr = *(u8 *) run->mmio.data;
2543 break;
2544 }
2545
2546 done:
2547 return er;
2548 }
2549
2550 static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2551 u32 *opc,
2552 struct kvm_run *run,
2553 struct kvm_vcpu *vcpu)
2554 {
2555 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2556 struct mips_coproc *cop0 = vcpu->arch.cop0;
2557 struct kvm_vcpu_arch *arch = &vcpu->arch;
2558 enum emulation_result er = EMULATE_DONE;
2559
2560 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2561 /* save old pc */
2562 kvm_write_c0_guest_epc(cop0, arch->pc);
2563 kvm_set_c0_guest_status(cop0, ST0_EXL);
2564
2565 if (cause & CAUSEF_BD)
2566 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2567 else
2568 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2569
2570 kvm_change_c0_guest_cause(cop0, (0xff),
2571 (exccode << CAUSEB_EXCCODE));
2572
2573 /* Set PC to the exception entry point */
2574 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2575 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2576
2577 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2578 exccode, kvm_read_c0_guest_epc(cop0),
2579 kvm_read_c0_guest_badvaddr(cop0));
2580 } else {
2581 kvm_err("Trying to deliver EXC when EXL is already set\n");
2582 er = EMULATE_FAIL;
2583 }
2584
2585 return er;
2586 }
2587
2588 enum emulation_result kvm_mips_check_privilege(u32 cause,
2589 u32 *opc,
2590 struct kvm_run *run,
2591 struct kvm_vcpu *vcpu)
2592 {
2593 enum emulation_result er = EMULATE_DONE;
2594 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2595 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2596
2597 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2598
2599 if (usermode) {
2600 switch (exccode) {
2601 case EXCCODE_INT:
2602 case EXCCODE_SYS:
2603 case EXCCODE_BP:
2604 case EXCCODE_RI:
2605 case EXCCODE_TR:
2606 case EXCCODE_MSAFPE:
2607 case EXCCODE_FPE:
2608 case EXCCODE_MSADIS:
2609 break;
2610
2611 case EXCCODE_CPU:
2612 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2613 er = EMULATE_PRIV_FAIL;
2614 break;
2615
2616 case EXCCODE_MOD:
2617 break;
2618
2619 case EXCCODE_TLBL:
2620 /*
2621 * We we are accessing Guest kernel space, then send an
2622 * address error exception to the guest
2623 */
2624 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2625 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2626 badvaddr);
2627 cause &= ~0xff;
2628 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2629 er = EMULATE_PRIV_FAIL;
2630 }
2631 break;
2632
2633 case EXCCODE_TLBS:
2634 /*
2635 * We we are accessing Guest kernel space, then send an
2636 * address error exception to the guest
2637 */
2638 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2639 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2640 badvaddr);
2641 cause &= ~0xff;
2642 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2643 er = EMULATE_PRIV_FAIL;
2644 }
2645 break;
2646
2647 case EXCCODE_ADES:
2648 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2649 badvaddr);
2650 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2651 cause &= ~0xff;
2652 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2653 }
2654 er = EMULATE_PRIV_FAIL;
2655 break;
2656 case EXCCODE_ADEL:
2657 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2658 badvaddr);
2659 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2660 cause &= ~0xff;
2661 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2662 }
2663 er = EMULATE_PRIV_FAIL;
2664 break;
2665 default:
2666 er = EMULATE_PRIV_FAIL;
2667 break;
2668 }
2669 }
2670
2671 if (er == EMULATE_PRIV_FAIL)
2672 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2673
2674 return er;
2675 }
2676
2677 /*
2678 * User Address (UA) fault, this could happen if
2679 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2680 * case we pass on the fault to the guest kernel and let it handle it.
2681 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2682 * case we inject the TLB from the Guest TLB into the shadow host TLB
2683 */
2684 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2685 u32 *opc,
2686 struct kvm_run *run,
2687 struct kvm_vcpu *vcpu)
2688 {
2689 enum emulation_result er = EMULATE_DONE;
2690 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2691 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2692 int index;
2693
2694 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2695 vcpu->arch.host_cp0_badvaddr);
2696
2697 /*
2698 * KVM would not have got the exception if this entry was valid in the
2699 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2700 * send the guest an exception. The guest exc handler should then inject
2701 * an entry into the guest TLB.
2702 */
2703 index = kvm_mips_guest_tlb_lookup(vcpu,
2704 (va & VPN2_MASK) |
2705 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2706 KVM_ENTRYHI_ASID));
2707 if (index < 0) {
2708 if (exccode == EXCCODE_TLBL) {
2709 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2710 } else if (exccode == EXCCODE_TLBS) {
2711 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2712 } else {
2713 kvm_err("%s: invalid exc code: %d\n", __func__,
2714 exccode);
2715 er = EMULATE_FAIL;
2716 }
2717 } else {
2718 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2719
2720 /*
2721 * Check if the entry is valid, if not then setup a TLB invalid
2722 * exception to the guest
2723 */
2724 if (!TLB_IS_VALID(*tlb, va)) {
2725 if (exccode == EXCCODE_TLBL) {
2726 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2727 vcpu);
2728 } else if (exccode == EXCCODE_TLBS) {
2729 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2730 vcpu);
2731 } else {
2732 kvm_err("%s: invalid exc code: %d\n", __func__,
2733 exccode);
2734 er = EMULATE_FAIL;
2735 }
2736 } else {
2737 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2738 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2739 /*
2740 * OK we have a Guest TLB entry, now inject it into the
2741 * shadow host TLB
2742 */
2743 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2744 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2745 __func__, va, index, vcpu,
2746 read_c0_entryhi());
2747 er = EMULATE_FAIL;
2748 }
2749 }
2750 }
2751
2752 return er;
2753 }