#define CALLFRAME_SIZ 32
+static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
+static unsigned int scratch_tmp[2] = { C0_ERROREPC };
+
enum label_id {
label_fpu_1 = 1,
label_msa_1,
static void *kvm_mips_build_ret_to_guest(void *addr);
static void *kvm_mips_build_ret_to_host(void *addr);
+/**
+ * kvm_mips_entry_setup() - Perform global setup for entry code.
+ *
+ * Perform global setup for entry code, such as choosing a scratch register.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+int kvm_mips_entry_setup(void)
+{
+ /*
+ * We prefer to use KScratchN registers if they are available over the
+ * defaults above, which may not work on all cores.
+ */
+ unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
+
+ /* Pick a scratch register for storing VCPU */
+ if (kscratch_mask) {
+ scratch_vcpu[0] = 31;
+ scratch_vcpu[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_vcpu[1]);
+ }
+
+ /* Pick a scratch register to use as a temp for saving state */
+ if (kscratch_mask) {
+ scratch_tmp[0] = 31;
+ scratch_tmp[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_tmp[1]);
+ }
+
+ return 0;
+}
+
+static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /* Save the VCPU scratch register value in cp0_epc of the stack frame */
+ uasm_i_mfc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+
+ /* Save the temp scratch register value in cp0_cause of stack frame */
+ if (scratch_tmp[0] == 31) {
+ uasm_i_mfc0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ }
+}
+
+static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /*
+ * Restore host scratch register values saved by
+ * kvm_mips_build_save_scratch().
+ */
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+ uasm_i_mtc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+
+ if (scratch_tmp[0] == 31) {
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ uasm_i_mtc0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ }
+}
+
/**
* kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
* @addr: Address to start writing code.
uasm_i_mfc0(&p, V0, C0_STATUS);
UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
- /* Save DDATA_LO, will be used to store pointer to vcpu */
- uasm_i_mfc0(&p, V1, C0_DDATA_LO);
- UASM_i_SW(&p, V1, offsetof(struct pt_regs, cp0_epc), K1);
+ /* Save scratch registers, will be used to store pointer to vcpu etc */
+ kvm_mips_build_save_scratch(&p, V1, K1);
- /* DDATA_LO has pointer to vcpu */
- uasm_i_mtc0(&p, A1, C0_DDATA_LO);
+ /* VCPU scratch register has pointer to vcpu */
+ uasm_i_mtc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
/* Offset into vcpu->arch */
uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
u32 *p = addr;
/* Save guest k0 */
- uasm_i_mtc0(&p, K0, C0_ERROREPC);
+ uasm_i_mtc0(&p, K0, scratch_tmp[0], scratch_tmp[1]);
uasm_i_ehb(&p);
/* Get EBASE */
* does something that causes a trap to kernel mode.
*/
- /* Get the VCPU pointer from DDATA_LO */
- uasm_i_mfc0(&p, K1, C0_DDATA_LO);
+ /* Get the VCPU pointer from the scratch register */
+ uasm_i_mfc0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
/* Start saving Guest context to VCPU */
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
/* Finally save guest k0/k1 to VCPU */
- uasm_i_mfc0(&p, T0, C0_ERROREPC);
+ uasm_i_mfc0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
/* Get GUEST k1 and save it in VCPU */
/* Now that context has been saved, we can use other registers */
/* Restore vcpu */
- uasm_i_mfc0(&p, A1, C0_DDATA_LO);
+ uasm_i_mfc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
uasm_i_move(&p, S1, A1);
/* Restore run (vcpu->run) */
* kernel entries are marked GLOBAL, need to verify
*/
- /* Restore host DDATA_LO */
- UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), SP);
- uasm_i_mtc0(&p, K0, C0_DDATA_LO);
+ /* Restore host scratch registers, as we'll have clobbered them */
+ kvm_mips_build_restore_scratch(&p, K0, SP);
/* Restore RDHWR access */
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
{
u32 *p = addr;
- /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
- uasm_i_mtc0(&p, S1, C0_DDATA_LO);
+ /* Put the saved pointer to vcpu (s1) back into the scratch register */
+ uasm_i_mtc0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
/* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);