]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
Merge branch 'mti-next' of git://git.linux-mips.org/pub/scm/sjhill/linux-sjhill into...
authorRalf Baechle <ralf@linux-mips.org>
Thu, 9 May 2013 15:57:30 +0000 (17:57 +0200)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 9 May 2013 15:57:30 +0000 (17:57 +0200)
14 files changed:
1  2 
arch/mips/Kconfig
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/uaccess.h
arch/mips/kernel/Makefile
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/proc.c
arch/mips/kernel/traps.c
arch/mips/kvm/kvm_mips_emul.c
arch/mips/kvm/kvm_tlb.c
arch/mips/mm/c-r4k.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-time.c

Simple merge
index 143875c6c95add1730f000ba326c7b2237f593a8,0000000000000000000000000000000000000000..e68781e183873b8ef40ae98157a4e6b260dd3cf9
mode 100644,000000..100644
--- /dev/null
@@@ -1,667 -1,0 +1,667 @@@
- #define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK)
 +/*
 +* This file is subject to the terms and conditions of the GNU General Public
 +* License.  See the file "COPYING" in the main directory of this archive
 +* for more details.
 +*
 +* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 +* Authors: Sanjay Lal <sanjayl@kymasys.com>
 +*/
 +
 +#ifndef __MIPS_KVM_HOST_H__
 +#define __MIPS_KVM_HOST_H__
 +
 +#include <linux/mutex.h>
 +#include <linux/hrtimer.h>
 +#include <linux/interrupt.h>
 +#include <linux/types.h>
 +#include <linux/kvm.h>
 +#include <linux/kvm_types.h>
 +#include <linux/threads.h>
 +#include <linux/spinlock.h>
 +
 +
 +#define KVM_MAX_VCPUS         1
 +#define KVM_USER_MEM_SLOTS    8
 +/* memory slots that does not exposed to userspace */
 +#define KVM_PRIVATE_MEM_SLOTS         0
 +
 +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 +
 +/* Don't support huge pages */
 +#define KVM_HPAGE_GFN_SHIFT(x)        0
 +
 +/* We don't currently support large pages. */
 +#define KVM_NR_PAGE_SIZES     1
 +#define KVM_PAGES_PER_HPAGE(x)        1
 +
 +
 +
 +/* Special address that contains the comm page, used for reducing # of traps */
 +#define KVM_GUEST_COMMPAGE_ADDR     0x0
 +
 +#define KVM_GUEST_KERNEL_MODE(vcpu)   ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
 +                                      ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
 +
 +#define KVM_GUEST_KUSEG             0x00000000UL
 +#define KVM_GUEST_KSEG0             0x40000000UL
 +#define KVM_GUEST_KSEG23            0x60000000UL
 +#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000)
 +#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff)
 +
 +#define KVM_GUEST_CKSEG0ADDR(a)               (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
 +#define KVM_GUEST_CKSEG1ADDR(a)               (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
 +#define KVM_GUEST_CKSEG23ADDR(a)      (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
 +
 +/*
 + * Map an address to a certain kernel segment
 + */
 +#define KVM_GUEST_KSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
 +#define KVM_GUEST_KSEG1ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
 +#define KVM_GUEST_KSEG23ADDR(a)               (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
 +
 +#define KVM_INVALID_PAGE            0xdeadbeef
 +#define KVM_INVALID_INST            0xdeadbeef
 +#define KVM_INVALID_ADDR            0xdeadbeef
 +
 +#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL
 +
 +#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
 +#define MS_TO_NS(x) (x * 1E6L)
 +
 +#define CAUSEB_DC       27
 +#define CAUSEF_DC       (_ULCAST_(1)   << 27)
 +
 +struct kvm;
 +struct kvm_run;
 +struct kvm_vcpu;
 +struct kvm_interrupt;
 +
 +extern atomic_t kvm_mips_instance;
 +extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
 +extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
 +extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
 +
 +struct kvm_vm_stat {
 +      u32 remote_tlb_flush;
 +};
 +
 +struct kvm_vcpu_stat {
 +      u32 wait_exits;
 +      u32 cache_exits;
 +      u32 signal_exits;
 +      u32 int_exits;
 +      u32 cop_unusable_exits;
 +      u32 tlbmod_exits;
 +      u32 tlbmiss_ld_exits;
 +      u32 tlbmiss_st_exits;
 +      u32 addrerr_st_exits;
 +      u32 addrerr_ld_exits;
 +      u32 syscall_exits;
 +      u32 resvd_inst_exits;
 +      u32 break_inst_exits;
 +      u32 flush_dcache_exits;
 +      u32 halt_wakeup;
 +};
 +
 +enum kvm_mips_exit_types {
 +      WAIT_EXITS,
 +      CACHE_EXITS,
 +      SIGNAL_EXITS,
 +      INT_EXITS,
 +      COP_UNUSABLE_EXITS,
 +      TLBMOD_EXITS,
 +      TLBMISS_LD_EXITS,
 +      TLBMISS_ST_EXITS,
 +      ADDRERR_ST_EXITS,
 +      ADDRERR_LD_EXITS,
 +      SYSCALL_EXITS,
 +      RESVD_INST_EXITS,
 +      BREAK_INST_EXITS,
 +      FLUSH_DCACHE_EXITS,
 +      MAX_KVM_MIPS_EXIT_TYPES
 +};
 +
 +struct kvm_arch_memory_slot {
 +};
 +
 +struct kvm_arch {
 +      /* Guest GVA->HPA page table */
 +      unsigned long *guest_pmap;
 +      unsigned long guest_pmap_npages;
 +
 +      /* Wired host TLB used for the commpage */
 +      int commpage_tlb;
 +};
 +
 +#define N_MIPS_COPROC_REGS      32
 +#define N_MIPS_COPROC_SEL     8
 +
 +struct mips_coproc {
 +      unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
 +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
 +      unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
 +#endif
 +};
 +
 +/*
 + * Coprocessor 0 register names
 + */
 +#define       MIPS_CP0_TLB_INDEX          0
 +#define       MIPS_CP0_TLB_RANDOM         1
 +#define       MIPS_CP0_TLB_LOW            2
 +#define       MIPS_CP0_TLB_LO0            2
 +#define       MIPS_CP0_TLB_LO1            3
 +#define       MIPS_CP0_TLB_CONTEXT    4
 +#define       MIPS_CP0_TLB_PG_MASK    5
 +#define       MIPS_CP0_TLB_WIRED          6
 +#define       MIPS_CP0_HWRENA             7
 +#define       MIPS_CP0_BAD_VADDR          8
 +#define       MIPS_CP0_COUNT          9
 +#define       MIPS_CP0_TLB_HI         10
 +#define       MIPS_CP0_COMPARE            11
 +#define       MIPS_CP0_STATUS         12
 +#define       MIPS_CP0_CAUSE          13
 +#define       MIPS_CP0_EXC_PC         14
 +#define       MIPS_CP0_PRID               15
 +#define       MIPS_CP0_CONFIG         16
 +#define       MIPS_CP0_LLADDR         17
 +#define       MIPS_CP0_WATCH_LO           18
 +#define       MIPS_CP0_WATCH_HI           19
 +#define       MIPS_CP0_TLB_XCONTEXT   20
 +#define       MIPS_CP0_ECC                26
 +#define       MIPS_CP0_CACHE_ERR          27
 +#define       MIPS_CP0_TAG_LO         28
 +#define       MIPS_CP0_TAG_HI         29
 +#define       MIPS_CP0_ERROR_PC           30
 +#define       MIPS_CP0_DEBUG          23
 +#define       MIPS_CP0_DEPC               24
 +#define       MIPS_CP0_PERFCNT            25
 +#define       MIPS_CP0_ERRCTL         26
 +#define       MIPS_CP0_DATA_LO            28
 +#define       MIPS_CP0_DATA_HI            29
 +#define       MIPS_CP0_DESAVE         31
 +
 +#define MIPS_CP0_CONFIG_SEL       0
 +#define MIPS_CP0_CONFIG1_SEL    1
 +#define MIPS_CP0_CONFIG2_SEL    2
 +#define MIPS_CP0_CONFIG3_SEL    3
 +
 +/* Config0 register bits */
 +#define CP0C0_M    31
 +#define CP0C0_K23  28
 +#define CP0C0_KU   25
 +#define CP0C0_MDU  20
 +#define CP0C0_MM   17
 +#define CP0C0_BM   16
 +#define CP0C0_BE   15
 +#define CP0C0_AT   13
 +#define CP0C0_AR   10
 +#define CP0C0_MT   7
 +#define CP0C0_VI   3
 +#define CP0C0_K0   0
 +
 +/* Config1 register bits */
 +#define CP0C1_M    31
 +#define CP0C1_MMU  25
 +#define CP0C1_IS   22
 +#define CP0C1_IL   19
 +#define CP0C1_IA   16
 +#define CP0C1_DS   13
 +#define CP0C1_DL   10
 +#define CP0C1_DA   7
 +#define CP0C1_C2   6
 +#define CP0C1_MD   5
 +#define CP0C1_PC   4
 +#define CP0C1_WR   3
 +#define CP0C1_CA   2
 +#define CP0C1_EP   1
 +#define CP0C1_FP   0
 +
 +/* Config2 Register bits */
 +#define CP0C2_M    31
 +#define CP0C2_TU   28
 +#define CP0C2_TS   24
 +#define CP0C2_TL   20
 +#define CP0C2_TA   16
 +#define CP0C2_SU   12
 +#define CP0C2_SS   8
 +#define CP0C2_SL   4
 +#define CP0C2_SA   0
 +
 +/* Config3 Register bits */
 +#define CP0C3_M    31
 +#define CP0C3_ISA_ON_EXC 16
 +#define CP0C3_ULRI  13
 +#define CP0C3_DSPP 10
 +#define CP0C3_LPA  7
 +#define CP0C3_VEIC 6
 +#define CP0C3_VInt 5
 +#define CP0C3_SP   4
 +#define CP0C3_MT   2
 +#define CP0C3_SM   1
 +#define CP0C3_TL   0
 +
 +/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
 +#define MIPS_CONFIG0                                              \
 +  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
 +
 +/* Have config2, no coprocessor2 attached, no MDMX support attached,
 +   no performance counters, watch registers present,
 +   no code compression, EJTAG present, no FPU, no watch registers */
 +#define MIPS_CONFIG1                                              \
 +((1 << CP0C1_M) |                                                 \
 + (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \
 + (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \
 + (0 << CP0C1_FP))
 +
 +/* Have config3, no tertiary/secondary caches implemented */
 +#define MIPS_CONFIG2                                              \
 +((1 << CP0C2_M))
 +
 +/* No config4, no DSP ASE, no large physaddr (PABITS),
 +   no external interrupt controller, no vectored interrupts,
 +   no 1kb pages, no SmartMIPS ASE, no trace logic */
 +#define MIPS_CONFIG3                                              \
 +((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \
 + (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \
 + (0 << CP0C3_SM) | (0 << CP0C3_TL))
 +
 +/* MMU types, the first four entries have the same layout as the
 +   CP0C0_MT field.  */
 +enum mips_mmu_types {
 +      MMU_TYPE_NONE,
 +      MMU_TYPE_R4000,
 +      MMU_TYPE_RESERVED,
 +      MMU_TYPE_FMT,
 +      MMU_TYPE_R3000,
 +      MMU_TYPE_R6000,
 +      MMU_TYPE_R8000
 +};
 +
 +/*
 + * Trap codes
 + */
 +#define T_INT           0     /* Interrupt pending */
 +#define T_TLB_MOD       1     /* TLB modified fault */
 +#define T_TLB_LD_MISS       2 /* TLB miss on load or ifetch */
 +#define T_TLB_ST_MISS       3 /* TLB miss on a store */
 +#define T_ADDR_ERR_LD       4 /* Address error on a load or ifetch */
 +#define T_ADDR_ERR_ST       5 /* Address error on a store */
 +#define T_BUS_ERR_IFETCH    6 /* Bus error on an ifetch */
 +#define T_BUS_ERR_LD_ST     7 /* Bus error on a load or store */
 +#define T_SYSCALL       8     /* System call */
 +#define T_BREAK         9     /* Breakpoint */
 +#define T_RES_INST      10    /* Reserved instruction exception */
 +#define T_COP_UNUSABLE      11        /* Coprocessor unusable */
 +#define T_OVFLOW        12    /* Arithmetic overflow */
 +
 +/*
 + * Trap definitions added for r4000 port.
 + */
 +#define T_TRAP          13    /* Trap instruction */
 +#define T_VCEI          14    /* Virtual coherency exception */
 +#define T_FPE           15    /* Floating point exception */
 +#define T_WATCH         23    /* Watch address reference */
 +#define T_VCED          31    /* Virtual coherency data */
 +
 +/* Resume Flags */
 +#define RESUME_FLAG_DR          (1<<0)        /* Reload guest nonvolatile state? */
 +#define RESUME_FLAG_HOST        (1<<1)        /* Resume host? */
 +
 +#define RESUME_GUEST            0
 +#define RESUME_GUEST_DR         RESUME_FLAG_DR
 +#define RESUME_HOST             RESUME_FLAG_HOST
 +
 +enum emulation_result {
 +      EMULATE_DONE,           /* no further processing */
 +      EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
 +      EMULATE_FAIL,           /* can't emulate this instruction */
 +      EMULATE_WAIT,           /* WAIT instruction */
 +      EMULATE_PRIV_FAIL,
 +};
 +
 +#define MIPS3_PG_G  0x00000001        /* Global; ignore ASID if in lo0 & lo1 */
 +#define MIPS3_PG_V  0x00000002        /* Valid */
 +#define MIPS3_PG_NV 0x00000000
 +#define MIPS3_PG_D  0x00000004        /* Dirty */
 +
 +#define mips3_paddr_to_tlbpfn(x) \
 +    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
 +#define mips3_tlbpfn_to_paddr(x) \
 +    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
 +
 +#define MIPS3_PG_SHIFT      6
 +#define MIPS3_PG_FRAME      0x3fffffc0
 +
 +#define VPN2_MASK           0xffffe000
 +#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
 +#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
++#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
 +#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
 +
 +struct kvm_mips_tlb {
 +      long tlb_mask;
 +      long tlb_hi;
 +      long tlb_lo0;
 +      long tlb_lo1;
 +};
 +
 +#define KVM_MIPS_GUEST_TLB_SIZE     64
 +struct kvm_vcpu_arch {
 +      void *host_ebase, *guest_ebase;
 +      unsigned long host_stack;
 +      unsigned long host_gp;
 +
 +      /* Host CP0 registers used when handling exits from guest */
 +      unsigned long host_cp0_badvaddr;
 +      unsigned long host_cp0_cause;
 +      unsigned long host_cp0_epc;
 +      unsigned long host_cp0_entryhi;
 +      uint32_t guest_inst;
 +
 +      /* GPRS */
 +      unsigned long gprs[32];
 +      unsigned long hi;
 +      unsigned long lo;
 +      unsigned long pc;
 +
 +      /* FPU State */
 +      struct mips_fpu_struct fpu;
 +
 +      /* COP0 State */
 +      struct mips_coproc *cop0;
 +
 +      /* Host KSEG0 address of the EI/DI offset */
 +      void *kseg0_commpage;
 +
 +      u32 io_gpr;             /* GPR used as IO source/target */
 +
 +      /* Used to calibrate the virutal count register for the guest */
 +      int32_t host_cp0_count;
 +
 +      /* Bitmask of exceptions that are pending */
 +      unsigned long pending_exceptions;
 +
 +      /* Bitmask of pending exceptions to be cleared */
 +      unsigned long pending_exceptions_clr;
 +
 +      unsigned long pending_load_cause;
 +
 +      /* Save/Restore the entryhi register when are are preempted/scheduled back in */
 +      unsigned long preempt_entryhi;
 +
 +      /* S/W Based TLB for guest */
 +      struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
 +
 +      /* Cached guest kernel/user ASIDs */
 +      uint32_t guest_user_asid[NR_CPUS];
 +      uint32_t guest_kernel_asid[NR_CPUS];
 +      struct mm_struct guest_kernel_mm, guest_user_mm;
 +
 +      struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
 +
 +
 +      struct hrtimer comparecount_timer;
 +
 +      int last_sched_cpu;
 +
 +      /* WAIT executed */
 +      int wait;
 +};
 +
 +
 +#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0])
 +#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
 +#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0])
 +#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0])
 +#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
 +#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
 +#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
 +#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
 +#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
 +#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0])
 +#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
 +#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0])
 +#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
 +#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0])
 +#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val))
 +#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0])
 +#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
 +#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0])
 +#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
 +#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0])
 +#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val))
 +#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1])
 +#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val))
 +#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0])
 +#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
 +#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0])
 +#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
 +#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0])
 +#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val))
 +#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1])
 +#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val))
 +#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0])
 +#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1])
 +#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2])
 +#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3])
 +#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7])
 +#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
 +#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
 +#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
 +#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
 +#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
 +#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0])
 +#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
 +
 +#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
 +#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
 +#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
 +#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
 +#define kvm_change_c0_guest_cause(cop0, change, val)  \
 +{                                                     \
 +    kvm_clear_c0_guest_cause(cop0, change);           \
 +    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
 +}
 +#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val))
 +#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
 +#define kvm_change_c0_guest_ebase(cop0, change, val)  \
 +{                                                     \
 +    kvm_clear_c0_guest_ebase(cop0, change);           \
 +    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
 +}
 +
 +
 +struct kvm_mips_callbacks {
 +      int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
 +      int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
 +      int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
 +      int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
 +      int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
 +      int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
 +      int (*handle_syscall) (struct kvm_vcpu *vcpu);
 +      int (*handle_res_inst) (struct kvm_vcpu *vcpu);
 +      int (*handle_break) (struct kvm_vcpu *vcpu);
 +      int (*vm_init) (struct kvm *kvm);
 +      int (*vcpu_init) (struct kvm_vcpu *vcpu);
 +      int (*vcpu_setup) (struct kvm_vcpu *vcpu);
 +       gpa_t(*gva_to_gpa) (gva_t gva);
 +      void (*queue_timer_int) (struct kvm_vcpu *vcpu);
 +      void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
 +      void (*queue_io_int) (struct kvm_vcpu *vcpu,
 +                            struct kvm_mips_interrupt *irq);
 +      void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
 +                              struct kvm_mips_interrupt *irq);
 +      int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
 +                          uint32_t cause);
 +      int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
 +                        uint32_t cause);
 +      int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
 +                                  struct kvm_regs *regs);
 +      int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
 +                                  struct kvm_regs *regs);
 +};
 +extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 +int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
 +
 +/* Debug: dump vcpu state */
 +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 +
 +/* Trampoline ASM routine to start running in "Guest" context */
 +extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
 +
 +/* TLB handling */
 +uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
 +
 +uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
 +
 +uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
 +
 +extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
 +                                         struct kvm_vcpu *vcpu);
 +
 +extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
 +                                            struct kvm_vcpu *vcpu);
 +
 +extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
 +                                              struct kvm_mips_tlb *tlb,
 +                                              unsigned long *hpa0,
 +                                              unsigned long *hpa1);
 +
 +extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
 +                                                   uint32_t *opc,
 +                                                   struct kvm_run *run,
 +                                                   struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
 +                                                  uint32_t *opc,
 +                                                  struct kvm_run *run,
 +                                                  struct kvm_vcpu *vcpu);
 +
 +extern void kvm_mips_dump_host_tlbs(void);
 +extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
 +extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
 +extern void kvm_mips_flush_host_tlb(int skip_kseg0);
 +extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
 +extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
 +
 +extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
 +                                   unsigned long entryhi);
 +extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
 +extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
 +                                                 unsigned long gva);
 +extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
 +                                  struct kvm_vcpu *vcpu);
 +extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
 +extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
 +extern void kvm_local_flush_tlb_all(void);
 +extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
 +extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
 +extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 +extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
 +
 +/* Emulation */
 +uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
 +enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
 +
 +extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
 +                                                 uint32_t *opc,
 +                                                 struct kvm_run *run,
 +                                                 struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
 +                                                    uint32_t *opc,
 +                                                    struct kvm_run *run,
 +                                                    struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
 +                                                       uint32_t *opc,
 +                                                       struct kvm_run *run,
 +                                                       struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
 +                                                      uint32_t *opc,
 +                                                      struct kvm_run *run,
 +                                                      struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
 +                                                       uint32_t *opc,
 +                                                       struct kvm_run *run,
 +                                                       struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
 +                                                      uint32_t *opc,
 +                                                      struct kvm_run *run,
 +                                                      struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
 +                                                   uint32_t *opc,
 +                                                   struct kvm_run *run,
 +                                                   struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
 +                                                    uint32_t *opc,
 +                                                    struct kvm_run *run,
 +                                                    struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
 +                                              uint32_t *opc,
 +                                              struct kvm_run *run,
 +                                              struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
 +                                                   uint32_t *opc,
 +                                                   struct kvm_run *run,
 +                                                   struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
 +                                                   uint32_t *opc,
 +                                                   struct kvm_run *run,
 +                                                   struct kvm_vcpu *vcpu);
 +
 +extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
 +                                                       struct kvm_run *run);
 +
 +enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
 +
 +enum emulation_result kvm_mips_check_privilege(unsigned long cause,
 +                                             uint32_t *opc,
 +                                             struct kvm_run *run,
 +                                             struct kvm_vcpu *vcpu);
 +
 +enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
 +                                           uint32_t *opc,
 +                                           uint32_t cause,
 +                                           struct kvm_run *run,
 +                                           struct kvm_vcpu *vcpu);
 +enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
 +                                         uint32_t *opc,
 +                                         uint32_t cause,
 +                                         struct kvm_run *run,
 +                                         struct kvm_vcpu *vcpu);
 +enum emulation_result kvm_mips_emulate_store(uint32_t inst,
 +                                           uint32_t cause,
 +                                           struct kvm_run *run,
 +                                           struct kvm_vcpu *vcpu);
 +enum emulation_result kvm_mips_emulate_load(uint32_t inst,
 +                                          uint32_t cause,
 +                                          struct kvm_run *run,
 +                                          struct kvm_vcpu *vcpu);
 +
 +/* Dynamic binary translation */
 +extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
 +                                    struct kvm_vcpu *vcpu);
 +extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
 +                                 struct kvm_vcpu *vcpu);
 +extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
 +                             struct kvm_vcpu *vcpu);
 +extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
 +                             struct kvm_vcpu *vcpu);
 +
 +/* Misc */
 +extern void mips32_SyncICache(unsigned long addr, unsigned long size);
 +extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
 +extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
 +
 +
 +#endif /* __MIPS_KVM_HOST_H__ */
index 820116067c101070c6a4c35727d1e4cfb24563ee,bab1980bbf0d462eac021c4815cd61e9f204832b..1554721e4808e7ffc67d61bc87646cbdd5a325be
@@@ -111,17 -129,12 +134,17 @@@ static inline void enter_lazy_tlb(struc
  static inline void
  get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
  {
 +      extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
  
-       if (! ((asid += ASID_INC) & ASID_MASK) ) {
+       if (!ASID_MASK((asid = ASID_INC(asid)))) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
 +#ifdef CONFIG_VIRTUALIZATION
 +              kvm_local_flush_tlb_all();      /* start new asid cycle */
 +#else
                local_flush_tlb_all();  /* start new asid cycle */
 +#endif
                if (!asid)              /* fix version if needed */
                        asid = ASID_FIRST_VERSION;
        }
Simple merge
Simple merge
Simple merge
Simple merge
index 7a99e60dadbd6da60dd6efdf1138ac8e9d94ff74,571a69c57d82ad891f3cab323207aa316616d348..3c906e723fd4243f103b9963cd7a8ef13bf2367f
@@@ -1390,10 -1473,26 +1473,27 @@@ unsigned long vi_handlers[64]
  void __init *set_except_vector(int n, void *addr)
  {
        unsigned long handler = (unsigned long) addr;
-       unsigned long old_handler = xchg(&exception_handlers[n], handler);
 -      unsigned long old_handler = exception_handlers[n];
++      unsigned long old_handler;
+ #ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Only the TLB handlers are cache aligned with an even
+        * address. All other handlers are on an odd address and
+        * require no modification. Otherwise, MIPS32 mode will
+        * be entered when handling any TLB exceptions. That
+        * would be bad...since we must stay in microMIPS mode.
+        */
+       if (!(handler & 0x1))
+               handler |= 1;
+ #endif
 -      exception_handlers[n] = handler;
++      old_handler = xchg(&exception_handlers[n], handler);
 +
        if (n == 0 && cpu_has_divec) {
+ #ifdef CONFIG_CPU_MICROMIPS
+               unsigned long jump_mask = ~((1 << 27) - 1);
+ #else
                unsigned long jump_mask = ~((1 << 28) - 1);
+ #endif
                u32 *buf = (u32 *)(ebase + 0x200);
                unsigned int k0 = 26;
                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
index 4b6274b47f3368b289b378703e8e9a17de5f9275,0000000000000000000000000000000000000000..2b2bac9a40aa00a762a0efae5310fddd0521fb5c
mode 100644,000000..100644
--- /dev/null
@@@ -1,1829 -1,0 +1,1826 @@@
-                               uint32_t nasid =
-                                   vcpu->arch.gprs[rt] & ASID_MASK;
 +/*
 +* This file is subject to the terms and conditions of the GNU General Public
 +* License.  See the file "COPYING" in the main directory of this archive
 +* for more details.
 +*
 +* KVM/MIPS: Instruction/Exception emulation
 +*
 +* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 +* Authors: Sanjay Lal <sanjayl@kymasys.com>
 +*/
 +
 +#include <linux/errno.h>
 +#include <linux/err.h>
 +#include <linux/kvm_host.h>
 +#include <linux/module.h>
 +#include <linux/vmalloc.h>
 +#include <linux/fs.h>
 +#include <linux/bootmem.h>
 +#include <linux/random.h>
 +#include <asm/page.h>
 +#include <asm/cacheflush.h>
 +#include <asm/cpu-info.h>
 +#include <asm/mmu_context.h>
 +#include <asm/tlbflush.h>
 +#include <asm/inst.h>
 +
 +#undef CONFIG_MIPS_MT
 +#include <asm/r4kcache.h>
 +#define CONFIG_MIPS_MT
 +
 +#include "kvm_mips_opcode.h"
 +#include "kvm_mips_int.h"
 +#include "kvm_mips_comm.h"
 +
 +#include "trace.h"
 +
 +/*
 + * Compute the return address and do emulate branch simulation, if required.
 + * This function should be called only in branch delay slot active.
 + */
 +unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
 +      unsigned long instpc)
 +{
 +      unsigned int dspcontrol;
 +      union mips_instruction insn;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      long epc = instpc;
 +      long nextpc = KVM_INVALID_INST;
 +
 +      if (epc & 3)
 +              goto unaligned;
 +
 +      /*
 +       * Read the instruction
 +       */
 +      insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
 +
 +      if (insn.word == KVM_INVALID_INST)
 +              return KVM_INVALID_INST;
 +
 +      switch (insn.i_format.opcode) {
 +              /*
 +               * jr and jalr are in r_format format.
 +               */
 +      case spec_op:
 +              switch (insn.r_format.func) {
 +              case jalr_op:
 +                      arch->gprs[insn.r_format.rd] = epc + 8;
 +                      /* Fall through */
 +              case jr_op:
 +                      nextpc = arch->gprs[insn.r_format.rs];
 +                      break;
 +              }
 +              break;
 +
 +              /*
 +               * This group contains:
 +               * bltz_op, bgez_op, bltzl_op, bgezl_op,
 +               * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
 +               */
 +      case bcond_op:
 +              switch (insn.i_format.rt) {
 +              case bltz_op:
 +              case bltzl_op:
 +                      if ((long)arch->gprs[insn.i_format.rs] < 0)
 +                              epc = epc + 4 + (insn.i_format.simmediate << 2);
 +                      else
 +                              epc += 8;
 +                      nextpc = epc;
 +                      break;
 +
 +              case bgez_op:
 +              case bgezl_op:
 +                      if ((long)arch->gprs[insn.i_format.rs] >= 0)
 +                              epc = epc + 4 + (insn.i_format.simmediate << 2);
 +                      else
 +                              epc += 8;
 +                      nextpc = epc;
 +                      break;
 +
 +              case bltzal_op:
 +              case bltzall_op:
 +                      arch->gprs[31] = epc + 8;
 +                      if ((long)arch->gprs[insn.i_format.rs] < 0)
 +                              epc = epc + 4 + (insn.i_format.simmediate << 2);
 +                      else
 +                              epc += 8;
 +                      nextpc = epc;
 +                      break;
 +
 +              case bgezal_op:
 +              case bgezall_op:
 +                      arch->gprs[31] = epc + 8;
 +                      if ((long)arch->gprs[insn.i_format.rs] >= 0)
 +                              epc = epc + 4 + (insn.i_format.simmediate << 2);
 +                      else
 +                              epc += 8;
 +                      nextpc = epc;
 +                      break;
 +              case bposge32_op:
 +                      if (!cpu_has_dsp)
 +                              goto sigill;
 +
 +                      dspcontrol = rddsp(0x01);
 +
 +                      if (dspcontrol >= 32) {
 +                              epc = epc + 4 + (insn.i_format.simmediate << 2);
 +                      } else
 +                              epc += 8;
 +                      nextpc = epc;
 +                      break;
 +              }
 +              break;
 +
 +              /*
 +               * These are unconditional and in j_format.
 +               */
 +      case jal_op:
 +              arch->gprs[31] = instpc + 8;
 +      case j_op:
 +              epc += 4;
 +              epc >>= 28;
 +              epc <<= 28;
 +              epc |= (insn.j_format.target << 2);
 +              nextpc = epc;
 +              break;
 +
 +              /*
 +               * These are conditional and in i_format.
 +               */
 +      case beq_op:
 +      case beql_op:
 +              if (arch->gprs[insn.i_format.rs] ==
 +                  arch->gprs[insn.i_format.rt])
 +                      epc = epc + 4 + (insn.i_format.simmediate << 2);
 +              else
 +                      epc += 8;
 +              nextpc = epc;
 +              break;
 +
 +      case bne_op:
 +      case bnel_op:
 +              if (arch->gprs[insn.i_format.rs] !=
 +                  arch->gprs[insn.i_format.rt])
 +                      epc = epc + 4 + (insn.i_format.simmediate << 2);
 +              else
 +                      epc += 8;
 +              nextpc = epc;
 +              break;
 +
 +      case blez_op:           /* not really i_format */
 +      case blezl_op:
 +              /* rt field assumed to be zero */
 +              if ((long)arch->gprs[insn.i_format.rs] <= 0)
 +                      epc = epc + 4 + (insn.i_format.simmediate << 2);
 +              else
 +                      epc += 8;
 +              nextpc = epc;
 +              break;
 +
 +      case bgtz_op:
 +      case bgtzl_op:
 +              /* rt field assumed to be zero */
 +              if ((long)arch->gprs[insn.i_format.rs] > 0)
 +                      epc = epc + 4 + (insn.i_format.simmediate << 2);
 +              else
 +                      epc += 8;
 +              nextpc = epc;
 +              break;
 +
 +              /*
 +               * And now the FPA/cp1 branch instructions.
 +               */
 +      case cop1_op:
 +              printk("%s: unsupported cop1_op\n", __func__);
 +              break;
 +      }
 +
 +      return nextpc;
 +
 +unaligned:
 +      printk("%s: unaligned epc\n", __func__);
 +      return nextpc;
 +
 +sigill:
 +      printk("%s: DSP branch but not DSP ASE\n", __func__);
 +      return nextpc;
 +}
 +
 +enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
 +{
 +      unsigned long branch_pc;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if (cause & CAUSEF_BD) {
 +              branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
 +              if (branch_pc == KVM_INVALID_INST) {
 +                      er = EMULATE_FAIL;
 +              } else {
 +                      vcpu->arch.pc = branch_pc;
 +                      kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
 +              }
 +      } else
 +              vcpu->arch.pc += 4;
 +
 +      kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
 +
 +      return er;
 +}
 +
 +/* Everytime the compare register is written to, we need to decide when to fire
 + * the timer that represents timer ticks to the GUEST.
 + *
 + */
 +enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      /* If COUNT is enabled */
 +      if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
 +              hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
 +              hrtimer_start(&vcpu->arch.comparecount_timer,
 +                            ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
 +      } else {
 +              hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
 +              kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
 +                        kvm_read_c0_guest_epc(cop0));
 +              kvm_clear_c0_guest_status(cop0, ST0_EXL);
 +              vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 +
 +      } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
 +              kvm_clear_c0_guest_status(cop0, ST0_ERL);
 +              vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
 +      } else {
 +              printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
 +                     vcpu->arch.pc);
 +              er = EMULATE_FAIL;
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
 +                vcpu->arch.pending_exceptions);
 +
 +      ++vcpu->stat.wait_exits;
 +      trace_kvm_exit(vcpu, WAIT_EXITS);
 +      if (!vcpu->arch.pending_exceptions) {
 +              vcpu->arch.wait = 1;
 +              kvm_vcpu_block(vcpu);
 +
 +              /* We we are runnable, then definitely go off to user space to check if any
 +               * I/O interrupts are pending.
 +               */
 +              if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
 +                      clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
 +                      vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 +              }
 +      }
 +
 +      return er;
 +}
 +
 +/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
 + * this, if things ever change
 + */
 +enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      enum emulation_result er = EMULATE_FAIL;
 +      uint32_t pc = vcpu->arch.pc;
 +
 +      printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
 +      return er;
 +}
 +
 +/* Write Guest TLB Entry @ Index */
 +enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      int index = kvm_read_c0_guest_index(cop0);
 +      enum emulation_result er = EMULATE_DONE;
 +      struct kvm_mips_tlb *tlb = NULL;
 +      uint32_t pc = vcpu->arch.pc;
 +
 +      if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
 +              printk("%s: illegal index: %d\n", __func__, index);
 +              printk
 +                  ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
 +                   pc, index, kvm_read_c0_guest_entryhi(cop0),
 +                   kvm_read_c0_guest_entrylo0(cop0),
 +                   kvm_read_c0_guest_entrylo1(cop0),
 +                   kvm_read_c0_guest_pagemask(cop0));
 +              index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
 +      }
 +
 +      tlb = &vcpu->arch.guest_tlb[index];
 +#if 1
 +      /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
 +      kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
 +#endif
 +
 +      tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 +      tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 +      tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
 +      tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
 +
 +      kvm_debug
 +          ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
 +           pc, index, kvm_read_c0_guest_entryhi(cop0),
 +           kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
 +           kvm_read_c0_guest_pagemask(cop0));
 +
 +      return er;
 +}
 +
 +/* Write Guest TLB Entry @ Random Index */
 +enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      enum emulation_result er = EMULATE_DONE;
 +      struct kvm_mips_tlb *tlb = NULL;
 +      uint32_t pc = vcpu->arch.pc;
 +      int index;
 +
 +#if 1
 +      get_random_bytes(&index, sizeof(index));
 +      index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
 +#else
 +      index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
 +#endif
 +
 +      if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
 +              printk("%s: illegal index: %d\n", __func__, index);
 +              return EMULATE_FAIL;
 +      }
 +
 +      tlb = &vcpu->arch.guest_tlb[index];
 +
 +#if 1
 +      /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
 +      kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
 +#endif
 +
 +      tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 +      tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 +      tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
 +      tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
 +
 +      kvm_debug
 +          ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
 +           pc, index, kvm_read_c0_guest_entryhi(cop0),
 +           kvm_read_c0_guest_entrylo0(cop0),
 +           kvm_read_c0_guest_entrylo1(cop0));
 +
 +      return er;
 +}
 +
 +enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      long entryhi = kvm_read_c0_guest_entryhi(cop0);
 +      enum emulation_result er = EMULATE_DONE;
 +      uint32_t pc = vcpu->arch.pc;
 +      int index = -1;
 +
 +      index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
 +
 +      kvm_write_c0_guest_index(cop0, index);
 +
 +      kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
 +                index);
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
 +                   struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      enum emulation_result er = EMULATE_DONE;
 +      int32_t rt, rd, copz, sel, co_bit, op;
 +      uint32_t pc = vcpu->arch.pc;
 +      unsigned long curr_pc;
 +
 +      /*
 +       * Update PC and hold onto current PC in case there is
 +       * an error and we want to rollback the PC
 +       */
 +      curr_pc = vcpu->arch.pc;
 +      er = update_pc(vcpu, cause);
 +      if (er == EMULATE_FAIL) {
 +              return er;
 +      }
 +
 +      copz = (inst >> 21) & 0x1f;
 +      rt = (inst >> 16) & 0x1f;
 +      rd = (inst >> 11) & 0x1f;
 +      sel = inst & 0x7;
 +      co_bit = (inst >> 25) & 1;
 +
 +      /* Verify that the register is valid */
 +      if (rd > MIPS_CP0_DESAVE) {
 +              printk("Invalid rd: %d\n", rd);
 +              er = EMULATE_FAIL;
 +              goto done;
 +      }
 +
 +      if (co_bit) {
 +              op = (inst) & 0xff;
 +
 +              switch (op) {
 +              case tlbr_op:   /*  Read indexed TLB entry  */
 +                      er = kvm_mips_emul_tlbr(vcpu);
 +                      break;
 +              case tlbwi_op:  /*  Write indexed  */
 +                      er = kvm_mips_emul_tlbwi(vcpu);
 +                      break;
 +              case tlbwr_op:  /*  Write random  */
 +                      er = kvm_mips_emul_tlbwr(vcpu);
 +                      break;
 +              case tlbp_op:   /* TLB Probe */
 +                      er = kvm_mips_emul_tlbp(vcpu);
 +                      break;
 +              case rfe_op:
 +                      printk("!!!COP0_RFE!!!\n");
 +                      break;
 +              case eret_op:
 +                      er = kvm_mips_emul_eret(vcpu);
 +                      goto dont_update_pc;
 +                      break;
 +              case wait_op:
 +                      er = kvm_mips_emul_wait(vcpu);
 +                      break;
 +              }
 +      } else {
 +              switch (copz) {
 +              case mfc_op:
 +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
 +                      cop0->stat[rd][sel]++;
 +#endif
 +                      /* Get reg */
 +                      if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
 +                              /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
 +                              vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
 +                      } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
 +                              vcpu->arch.gprs[rt] = 0x0;
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +                              kvm_mips_trans_mfc0(inst, opc, vcpu);
 +#endif
 +                      }
 +                      else {
 +                              vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
 +
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +                              kvm_mips_trans_mfc0(inst, opc, vcpu);
 +#endif
 +                      }
 +
 +                      kvm_debug
 +                          ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
 +                           pc, rd, sel, rt, vcpu->arch.gprs[rt]);
 +
 +                      break;
 +
 +              case dmfc_op:
 +                      vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
 +                      break;
 +
 +              case mtc_op:
 +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
 +                      cop0->stat[rd][sel]++;
 +#endif
 +                      if ((rd == MIPS_CP0_TLB_INDEX)
 +                          && (vcpu->arch.gprs[rt] >=
 +                              KVM_MIPS_GUEST_TLB_SIZE)) {
 +                              printk("Invalid TLB Index: %ld",
 +                                     vcpu->arch.gprs[rt]);
 +                              er = EMULATE_FAIL;
 +                              break;
 +                      }
 +#define C0_EBASE_CORE_MASK 0xff
 +                      if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
 +                              /* Preserve CORE number */
 +                              kvm_change_c0_guest_ebase(cop0,
 +                                                        ~(C0_EBASE_CORE_MASK),
 +                                                        vcpu->arch.gprs[rt]);
 +                              printk("MTCz, cop0->reg[EBASE]: %#lx\n",
 +                                     kvm_read_c0_guest_ebase(cop0));
 +                      } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
-                                   ((kvm_read_c0_guest_entryhi(cop0) &
-                                     ASID_MASK) != nasid)) {
++                              uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
 +                              if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
 +                                  &&
-                                            kvm_read_c0_guest_entryhi(cop0) &
-                                            ASID_MASK,
-                                            vcpu->arch.gprs[rt] & ASID_MASK);
++                                  (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
++                                    != nasid)) {
 +
 +                                      kvm_debug
 +                                          ("MTCz, change ASID from %#lx to %#lx\n",
-                                                 (kvm_read_c0_guest_entryhi
-                                                  (cop0) & ASID_MASK));
++                                           ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
++                                           ASID_MASK(vcpu->arch.gprs[rt]));
 +
 +                                      /* Blow away the shadow host TLBs */
 +                                      kvm_mips_flush_host_tlb(1);
 +                              }
 +                              kvm_write_c0_guest_entryhi(cop0,
 +                                                         vcpu->arch.gprs[rt]);
 +                      }
 +                      /* Are we writing to COUNT */
 +                      else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
 +                              /* Linux doesn't seem to write into COUNT, we throw an error
 +                               * if we notice a write to COUNT
 +                               */
 +                              /*er = EMULATE_FAIL; */
 +                              goto done;
 +                      } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
 +                              kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
 +                                        pc, kvm_read_c0_guest_compare(cop0),
 +                                        vcpu->arch.gprs[rt]);
 +
 +                              /* If we are writing to COMPARE */
 +                              /* Clear pending timer interrupt, if any */
 +                              kvm_mips_callbacks->dequeue_timer_int(vcpu);
 +                              kvm_write_c0_guest_compare(cop0,
 +                                                         vcpu->arch.gprs[rt]);
 +                      } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
 +                              kvm_write_c0_guest_status(cop0,
 +                                                        vcpu->arch.gprs[rt]);
 +                              /* Make sure that CU1 and NMI bits are never set */
 +                              kvm_clear_c0_guest_status(cop0,
 +                                                        (ST0_CU1 | ST0_NMI));
 +
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +                              kvm_mips_trans_mtc0(inst, opc, vcpu);
 +#endif
 +                      } else {
 +                              cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +                              kvm_mips_trans_mtc0(inst, opc, vcpu);
 +#endif
 +                      }
 +
 +                      kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
 +                                rd, sel, cop0->reg[rd][sel]);
 +                      break;
 +
 +              case dmtc_op:
 +                      printk
 +                          ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
 +                           vcpu->arch.pc, rt, rd, sel);
 +                      er = EMULATE_FAIL;
 +                      break;
 +
 +              case mfmcz_op:
 +#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
 +                      cop0->stat[MIPS_CP0_STATUS][0]++;
 +#endif
 +                      if (rt != 0) {
 +                              vcpu->arch.gprs[rt] =
 +                                  kvm_read_c0_guest_status(cop0);
 +                      }
 +                      /* EI */
 +                      if (inst & 0x20) {
 +                              kvm_debug("[%#lx] mfmcz_op: EI\n",
 +                                        vcpu->arch.pc);
 +                              kvm_set_c0_guest_status(cop0, ST0_IE);
 +                      } else {
 +                              kvm_debug("[%#lx] mfmcz_op: DI\n",
 +                                        vcpu->arch.pc);
 +                              kvm_clear_c0_guest_status(cop0, ST0_IE);
 +                      }
 +
 +                      break;
 +
 +              case wrpgpr_op:
 +                      {
 +                              uint32_t css =
 +                                  cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
 +                              uint32_t pss =
 +                                  (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
 +                              /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
 +                              if (css || pss) {
 +                                      er = EMULATE_FAIL;
 +                                      break;
 +                              }
 +                              kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
 +                                        vcpu->arch.gprs[rt]);
 +                              vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
 +                      }
 +                      break;
 +              default:
 +                      printk
 +                          ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
 +                           vcpu->arch.pc, copz);
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +      }
 +
 +done:
 +      /*
 +       * Rollback PC only if emulation was unsuccessful
 +       */
 +      if (er == EMULATE_FAIL) {
 +              vcpu->arch.pc = curr_pc;
 +      }
 +
 +dont_update_pc:
 +      /*
 +       * This is for special instructions whose emulation
 +       * updates the PC, so do not overwrite the PC under
 +       * any circumstances
 +       */
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
 +                     struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DO_MMIO;
 +      int32_t op, base, rt, offset;
 +      uint32_t bytes;
 +      void *data = run->mmio.data;
 +      unsigned long curr_pc;
 +
 +      /*
 +       * Update PC and hold onto current PC in case there is
 +       * an error and we want to rollback the PC
 +       */
 +      curr_pc = vcpu->arch.pc;
 +      er = update_pc(vcpu, cause);
 +      if (er == EMULATE_FAIL)
 +              return er;
 +
 +      rt = (inst >> 16) & 0x1f;
 +      base = (inst >> 21) & 0x1f;
 +      offset = inst & 0xffff;
 +      op = (inst >> 26) & 0x3f;
 +
 +      switch (op) {
 +      case sb_op:
 +              bytes = 1;
 +              if (bytes > sizeof(run->mmio.data)) {
 +                      kvm_err("%s: bad MMIO length: %d\n", __func__,
 +                             run->mmio.len);
 +              }
 +              run->mmio.phys_addr =
 +                  kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
 +                                                 host_cp0_badvaddr);
 +              if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +              run->mmio.len = bytes;
 +              run->mmio.is_write = 1;
 +              vcpu->mmio_needed = 1;
 +              vcpu->mmio_is_write = 1;
 +              *(u8 *) data = vcpu->arch.gprs[rt];
 +              kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
 +                        vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
 +                        *(uint8_t *) data);
 +
 +              break;
 +
 +      case sw_op:
 +              bytes = 4;
 +              if (bytes > sizeof(run->mmio.data)) {
 +                      kvm_err("%s: bad MMIO length: %d\n", __func__,
 +                             run->mmio.len);
 +              }
 +              run->mmio.phys_addr =
 +                  kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
 +                                                 host_cp0_badvaddr);
 +              if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +
 +              run->mmio.len = bytes;
 +              run->mmio.is_write = 1;
 +              vcpu->mmio_needed = 1;
 +              vcpu->mmio_is_write = 1;
 +              *(uint32_t *) data = vcpu->arch.gprs[rt];
 +
 +              kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
 +                        vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
 +                        vcpu->arch.gprs[rt], *(uint32_t *) data);
 +              break;
 +
 +      case sh_op:
 +              bytes = 2;
 +              if (bytes > sizeof(run->mmio.data)) {
 +                      kvm_err("%s: bad MMIO length: %d\n", __func__,
 +                             run->mmio.len);
 +              }
 +              run->mmio.phys_addr =
 +                  kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
 +                                                 host_cp0_badvaddr);
 +              if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +
 +              run->mmio.len = bytes;
 +              run->mmio.is_write = 1;
 +              vcpu->mmio_needed = 1;
 +              vcpu->mmio_is_write = 1;
 +              *(uint16_t *) data = vcpu->arch.gprs[rt];
 +
 +              kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
 +                        vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
 +                        vcpu->arch.gprs[rt], *(uint32_t *) data);
 +              break;
 +
 +      default:
 +              printk("Store not yet supported");
 +              er = EMULATE_FAIL;
 +              break;
 +      }
 +
 +      /*
 +       * Rollback PC if emulation was unsuccessful
 +       */
 +      if (er == EMULATE_FAIL) {
 +              vcpu->arch.pc = curr_pc;
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
 +                    struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DO_MMIO;
 +      int32_t op, base, rt, offset;
 +      uint32_t bytes;
 +
 +      rt = (inst >> 16) & 0x1f;
 +      base = (inst >> 21) & 0x1f;
 +      offset = inst & 0xffff;
 +      op = (inst >> 26) & 0x3f;
 +
 +      vcpu->arch.pending_load_cause = cause;
 +      vcpu->arch.io_gpr = rt;
 +
 +      switch (op) {
 +      case lw_op:
 +              bytes = 4;
 +              if (bytes > sizeof(run->mmio.data)) {
 +                      kvm_err("%s: bad MMIO length: %d\n", __func__,
 +                             run->mmio.len);
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +              run->mmio.phys_addr =
 +                  kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
 +                                                 host_cp0_badvaddr);
 +              if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +
 +              run->mmio.len = bytes;
 +              run->mmio.is_write = 0;
 +              vcpu->mmio_needed = 1;
 +              vcpu->mmio_is_write = 0;
 +              break;
 +
 +      case lh_op:
 +      case lhu_op:
 +              bytes = 2;
 +              if (bytes > sizeof(run->mmio.data)) {
 +                      kvm_err("%s: bad MMIO length: %d\n", __func__,
 +                             run->mmio.len);
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +              run->mmio.phys_addr =
 +                  kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
 +                                                 host_cp0_badvaddr);
 +              if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +
 +              run->mmio.len = bytes;
 +              run->mmio.is_write = 0;
 +              vcpu->mmio_needed = 1;
 +              vcpu->mmio_is_write = 0;
 +
 +              if (op == lh_op)
 +                      vcpu->mmio_needed = 2;
 +              else
 +                      vcpu->mmio_needed = 1;
 +
 +              break;
 +
 +      case lbu_op:
 +      case lb_op:
 +              bytes = 1;
 +              if (bytes > sizeof(run->mmio.data)) {
 +                      kvm_err("%s: bad MMIO length: %d\n", __func__,
 +                             run->mmio.len);
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +              run->mmio.phys_addr =
 +                  kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
 +                                                 host_cp0_badvaddr);
 +              if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +
 +              run->mmio.len = bytes;
 +              run->mmio.is_write = 0;
 +              vcpu->mmio_is_write = 0;
 +
 +              if (op == lb_op)
 +                      vcpu->mmio_needed = 2;
 +              else
 +                      vcpu->mmio_needed = 1;
 +
 +              break;
 +
 +      default:
 +              printk("Load not yet supported");
 +              er = EMULATE_FAIL;
 +              break;
 +      }
 +
 +      return er;
 +}
 +
 +int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
 +{
 +      unsigned long offset = (va & ~PAGE_MASK);
 +      struct kvm *kvm = vcpu->kvm;
 +      unsigned long pa;
 +      gfn_t gfn;
 +      pfn_t pfn;
 +
 +      gfn = va >> PAGE_SHIFT;
 +
 +      if (gfn >= kvm->arch.guest_pmap_npages) {
 +              printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
 +              kvm_mips_dump_host_tlbs();
 +              kvm_arch_vcpu_dump_regs(vcpu);
 +              return -1;
 +      }
 +      pfn = kvm->arch.guest_pmap[gfn];
 +      pa = (pfn << PAGE_SHIFT) | offset;
 +
 +      printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
 +
 +      mips32_SyncICache(CKSEG0ADDR(pa), 32);
 +      return 0;
 +}
 +
 +#define MIPS_CACHE_OP_INDEX_INV         0x0
 +#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
 +#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
 +#define MIPS_CACHE_OP_IMP               0x3
 +#define MIPS_CACHE_OP_HIT_INV           0x4
 +#define MIPS_CACHE_OP_FILL_WB_INV       0x5
 +#define MIPS_CACHE_OP_HIT_HB            0x6
 +#define MIPS_CACHE_OP_FETCH_LOCK        0x7
 +
 +#define MIPS_CACHE_ICACHE               0x0
 +#define MIPS_CACHE_DCACHE               0x1
 +#define MIPS_CACHE_SEC                  0x3
 +
 +enum emulation_result
 +kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
 +                     struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      extern void (*r4k_blast_dcache) (void);
 +      extern void (*r4k_blast_icache) (void);
 +      enum emulation_result er = EMULATE_DONE;
 +      int32_t offset, cache, op_inst, op, base;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      unsigned long va;
 +      unsigned long curr_pc;
 +
 +      /*
 +       * Update PC and hold onto current PC in case there is
 +       * an error and we want to rollback the PC
 +       */
 +      curr_pc = vcpu->arch.pc;
 +      er = update_pc(vcpu, cause);
 +      if (er == EMULATE_FAIL)
 +              return er;
 +
 +      base = (inst >> 21) & 0x1f;
 +      op_inst = (inst >> 16) & 0x1f;
 +      offset = inst & 0xffff;
 +      cache = (inst >> 16) & 0x3;
 +      op = (inst >> 18) & 0x7;
 +
 +      va = arch->gprs[base] + offset;
 +
 +      kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
 +                cache, op, base, arch->gprs[base], offset);
 +
 +      /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
 +       * the caches entirely by stepping through all the ways/indexes
 +       */
 +      if (op == MIPS_CACHE_OP_INDEX_INV) {
 +              kvm_debug
 +                  ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
 +                   vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
 +                   arch->gprs[base], offset);
 +
 +              if (cache == MIPS_CACHE_DCACHE)
 +                      r4k_blast_dcache();
 +              else if (cache == MIPS_CACHE_ICACHE)
 +                      r4k_blast_icache();
 +              else {
 +                      printk("%s: unsupported CACHE INDEX operation\n",
 +                             __func__);
 +                      return EMULATE_FAIL;
 +              }
 +
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +              kvm_mips_trans_cache_index(inst, opc, vcpu);
 +#endif
 +              goto done;
 +      }
 +
 +      preempt_disable();
 +      if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
 +
 +              if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
 +                      kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
 +              }
 +      } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
 +                 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
 +              int index;
 +
 +              /* If an entry already exists then skip */
 +              if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
 +                      goto skip_fault;
 +              }
 +
 +              /* If address not in the guest TLB, then give the guest a fault, the
 +               * resulting handler will do the right thing
 +               */
 +              index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
++                                                ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
 +
 +              if (index < 0) {
 +                      vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
 +                      vcpu->arch.host_cp0_badvaddr = va;
 +                      er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
 +                                                       vcpu);
 +                      preempt_enable();
 +                      goto dont_update_pc;
 +              } else {
 +                      struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
 +                      /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
 +                      if (!TLB_IS_VALID(*tlb, va)) {
 +                              er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
 +                                                              run, vcpu);
 +                              preempt_enable();
 +                              goto dont_update_pc;
 +                      } else {
 +                              /* We fault an entry from the guest tlb to the shadow host TLB */
 +                              kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
 +                                                                   NULL,
 +                                                                   NULL);
 +                      }
 +              }
 +      } else {
 +              printk
 +                  ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
 +                   cache, op, base, arch->gprs[base], offset);
 +              er = EMULATE_FAIL;
 +              preempt_enable();
 +              goto dont_update_pc;
 +
 +      }
 +
 +skip_fault:
 +      /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
 +      if (cache == MIPS_CACHE_DCACHE
 +          && (op == MIPS_CACHE_OP_FILL_WB_INV
 +              || op == MIPS_CACHE_OP_HIT_INV)) {
 +              flush_dcache_line(va);
 +
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +              /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
 +              kvm_mips_trans_cache_va(inst, opc, vcpu);
 +#endif
 +      } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
 +              flush_dcache_line(va);
 +              flush_icache_line(va);
 +
 +#ifdef CONFIG_KVM_MIPS_DYN_TRANS
 +              /* Replace the CACHE instruction, with a SYNCI */
 +              kvm_mips_trans_cache_va(inst, opc, vcpu);
 +#endif
 +      } else {
 +              printk
 +                  ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
 +                   cache, op, base, arch->gprs[base], offset);
 +              er = EMULATE_FAIL;
 +              preempt_enable();
 +              goto dont_update_pc;
 +      }
 +
 +      preempt_enable();
 +
 +      dont_update_pc:
 +      /*
 +       * Rollback PC
 +       */
 +      vcpu->arch.pc = curr_pc;
 +      done:
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
 +                    struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DONE;
 +      uint32_t inst;
 +
 +      /*
 +       *  Fetch the instruction.
 +       */
 +      if (cause & CAUSEF_BD) {
 +              opc += 1;
 +      }
 +
 +      inst = kvm_get_inst(opc, vcpu);
 +
 +      switch (((union mips_instruction)inst).r_format.opcode) {
 +      case cop0_op:
 +              er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
 +              break;
 +      case sb_op:
 +      case sh_op:
 +      case sw_op:
 +              er = kvm_mips_emulate_store(inst, cause, run, vcpu);
 +              break;
 +      case lb_op:
 +      case lbu_op:
 +      case lhu_op:
 +      case lh_op:
 +      case lw_op:
 +              er = kvm_mips_emulate_load(inst, cause, run, vcpu);
 +              break;
 +
 +      case cache_op:
 +              ++vcpu->stat.cache_exits;
 +              trace_kvm_exit(vcpu, CACHE_EXITS);
 +              er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
 +              break;
 +
 +      default:
 +              printk("Instruction emulation not supported (%p/%#x)\n", opc,
 +                     inst);
 +              kvm_arch_vcpu_dump_regs(vcpu);
 +              er = EMULATE_FAIL;
 +              break;
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
 +                       struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
 +
 +              kvm_change_c0_guest_cause(cop0, (0xff),
 +                                        (T_SYSCALL << CAUSEB_EXCCODE));
 +
 +              /* Set PC to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +
 +      } else {
 +              printk("Trying to deliver SYSCALL when EXL is already set\n");
 +              er = EMULATE_FAIL;
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
 +                          struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +      unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
-               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
++                              ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +
 +              /* set pc to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x0;
 +
 +      } else {
 +              kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      }
 +
 +      kvm_change_c0_guest_cause(cop0, (0xff),
 +                                (T_TLB_LD_MISS << CAUSEB_EXCCODE));
 +
 +      /* setup badvaddr, context and entryhi registers for the guest */
 +      kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
 +      /* XXXKYMA: is the context register used by linux??? */
 +      kvm_write_c0_guest_entryhi(cop0, entryhi);
 +      /* Blow away the shadow host TLBs */
 +      kvm_mips_flush_host_tlb(1);
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
 +                         struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +      unsigned long entryhi =
 +              (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
++              ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
 +                        arch->pc);
 +
 +              /* set pc to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +
 +      } else {
 +              kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      }
 +
 +      kvm_change_c0_guest_cause(cop0, (0xff),
 +                                (T_TLB_LD_MISS << CAUSEB_EXCCODE));
 +
 +      /* setup badvaddr, context and entryhi registers for the guest */
 +      kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
 +      /* XXXKYMA: is the context register used by linux??? */
 +      kvm_write_c0_guest_entryhi(cop0, entryhi);
 +      /* Blow away the shadow host TLBs */
 +      kvm_mips_flush_host_tlb(1);
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
 +                          struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +      unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
++                              ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +
 +              /* Set PC to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x0;
 +      } else {
 +              kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      }
 +
 +      kvm_change_c0_guest_cause(cop0, (0xff),
 +                                (T_TLB_ST_MISS << CAUSEB_EXCCODE));
 +
 +      /* setup badvaddr, context and entryhi registers for the guest */
 +      kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
 +      /* XXXKYMA: is the context register used by linux??? */
 +      kvm_write_c0_guest_entryhi(cop0, entryhi);
 +      /* Blow away the shadow host TLBs */
 +      kvm_mips_flush_host_tlb(1);
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
 +                         struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +      unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
++              ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +
 +              /* Set PC to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      } else {
 +              kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
 +                        arch->pc);
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      }
 +
 +      kvm_change_c0_guest_cause(cop0, (0xff),
 +                                (T_TLB_ST_MISS << CAUSEB_EXCCODE));
 +
 +      /* setup badvaddr, context and entryhi registers for the guest */
 +      kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
 +      /* XXXKYMA: is the context register used by linux??? */
 +      kvm_write_c0_guest_entryhi(cop0, entryhi);
 +      /* Blow away the shadow host TLBs */
 +      kvm_mips_flush_host_tlb(1);
 +
 +      return er;
 +}
 +
 +/* TLBMOD: store into address matching TLB with Dirty bit off */
 +enum emulation_result
 +kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
 +                     struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DONE;
 +
 +#ifdef DEBUG
 +      /*
 +       * If address not in the guest TLB, then we are in trouble
 +       */
 +      index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
 +      if (index < 0) {
 +              /* XXXKYMA Invalidate and retry */
 +              kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
 +              kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
 +                   __func__, entryhi);
 +              kvm_mips_dump_guest_tlbs(vcpu);
 +              kvm_mips_dump_host_tlbs();
 +              return EMULATE_FAIL;
 +      }
 +#endif
 +
 +      er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
 +                      struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                                         (kvm_read_c0_guest_entryhi
-                                          (vcpu->arch.cop0) & ASID_MASK));
++                              ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
 +                        arch->pc);
 +
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      } else {
 +              kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
 +                        arch->pc);
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +      }
 +
 +      kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
 +
 +      /* setup badvaddr, context and entryhi registers for the guest */
 +      kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
 +      /* XXXKYMA: is the context register used by linux??? */
 +      kvm_write_c0_guest_entryhi(cop0, entryhi);
 +      /* Blow away the shadow host TLBs */
 +      kvm_mips_flush_host_tlb(1);
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
 +                       struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +      }
 +
 +      arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +
 +      kvm_change_c0_guest_cause(cop0, (0xff),
 +                                (T_COP_UNUSABLE << CAUSEB_EXCCODE));
 +      kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
 +                      struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
 +
 +              kvm_change_c0_guest_cause(cop0, (0xff),
 +                                        (T_RES_INST << CAUSEB_EXCCODE));
 +
 +              /* Set PC to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +
 +      } else {
 +              kvm_err("Trying to deliver RI when EXL is already set\n");
 +              er = EMULATE_FAIL;
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
 +                      struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
 +
 +              kvm_change_c0_guest_cause(cop0, (0xff),
 +                                        (T_BREAK << CAUSEB_EXCCODE));
 +
 +              /* Set PC to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +
 +      } else {
 +              printk("Trying to deliver BP when EXL is already set\n");
 +              er = EMULATE_FAIL;
 +      }
 +
 +      return er;
 +}
 +
 +/*
 + * ll/sc, rdhwr, sync emulation
 + */
 +
 +#define OPCODE 0xfc000000
 +#define BASE   0x03e00000
 +#define RT     0x001f0000
 +#define OFFSET 0x0000ffff
 +#define LL     0xc0000000
 +#define SC     0xe0000000
 +#define SPEC0  0x00000000
 +#define SPEC3  0x7c000000
 +#define RD     0x0000f800
 +#define FUNC   0x0000003f
 +#define SYNC   0x0000000f
 +#define RDHWR  0x0000003b
 +
 +enum emulation_result
 +kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
 +                 struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +      unsigned long curr_pc;
 +      uint32_t inst;
 +
 +      /*
 +       * Update PC and hold onto current PC in case there is
 +       * an error and we want to rollback the PC
 +       */
 +      curr_pc = vcpu->arch.pc;
 +      er = update_pc(vcpu, cause);
 +      if (er == EMULATE_FAIL)
 +              return er;
 +
 +      /*
 +       *  Fetch the instruction.
 +       */
 +      if (cause & CAUSEF_BD)
 +              opc += 1;
 +
 +      inst = kvm_get_inst(opc, vcpu);
 +
 +      if (inst == KVM_INVALID_INST) {
 +              printk("%s: Cannot get inst @ %p\n", __func__, opc);
 +              return EMULATE_FAIL;
 +      }
 +
 +      if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
 +              int rd = (inst & RD) >> 11;
 +              int rt = (inst & RT) >> 16;
 +              switch (rd) {
 +              case 0: /* CPU number */
 +                      arch->gprs[rt] = 0;
 +                      break;
 +              case 1: /* SYNCI length */
 +                      arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
 +                                           current_cpu_data.icache.linesz);
 +                      break;
 +              case 2: /* Read count register */
 +                      printk("RDHWR: Cont register\n");
 +                      arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
 +                      break;
 +              case 3: /* Count register resolution */
 +                      switch (current_cpu_data.cputype) {
 +                      case CPU_20KC:
 +                      case CPU_25KF:
 +                              arch->gprs[rt] = 1;
 +                              break;
 +                      default:
 +                              arch->gprs[rt] = 2;
 +                      }
 +                      break;
 +              case 29:
 +#if 1
 +                      arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
 +#else
 +                      /* UserLocal not implemented */
 +                      er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
 +#endif
 +                      break;
 +
 +              default:
 +                      printk("RDHWR not supported\n");
 +                      er = EMULATE_FAIL;
 +                      break;
 +              }
 +      } else {
 +              printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
 +              er = EMULATE_FAIL;
 +      }
 +
 +      /*
 +       * Rollback PC only if emulation was unsuccessful
 +       */
 +      if (er == EMULATE_FAIL) {
 +              vcpu->arch.pc = curr_pc;
 +      }
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
 +{
 +      unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
 +      enum emulation_result er = EMULATE_DONE;
 +      unsigned long curr_pc;
 +
 +      if (run->mmio.len > sizeof(*gpr)) {
 +              printk("Bad MMIO length: %d", run->mmio.len);
 +              er = EMULATE_FAIL;
 +              goto done;
 +      }
 +
 +      /*
 +       * Update PC and hold onto current PC in case there is
 +       * an error and we want to rollback the PC
 +       */
 +      curr_pc = vcpu->arch.pc;
 +      er = update_pc(vcpu, vcpu->arch.pending_load_cause);
 +      if (er == EMULATE_FAIL)
 +              return er;
 +
 +      switch (run->mmio.len) {
 +      case 4:
 +              *gpr = *(int32_t *) run->mmio.data;
 +              break;
 +
 +      case 2:
 +              if (vcpu->mmio_needed == 2)
 +                      *gpr = *(int16_t *) run->mmio.data;
 +              else
 +                      *gpr = *(int16_t *) run->mmio.data;
 +
 +              break;
 +      case 1:
 +              if (vcpu->mmio_needed == 2)
 +                      *gpr = *(int8_t *) run->mmio.data;
 +              else
 +                      *gpr = *(u8 *) run->mmio.data;
 +              break;
 +      }
 +
 +      if (vcpu->arch.pending_load_cause & CAUSEF_BD)
 +              kvm_debug
 +                  ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
 +                   vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
 +                   vcpu->mmio_needed);
 +
 +done:
 +      return er;
 +}
 +
 +static enum emulation_result
 +kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
 +                   struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_vcpu_arch *arch = &vcpu->arch;
 +      enum emulation_result er = EMULATE_DONE;
 +
 +      if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 +              /* save old pc */
 +              kvm_write_c0_guest_epc(cop0, arch->pc);
 +              kvm_set_c0_guest_status(cop0, ST0_EXL);
 +
 +              if (cause & CAUSEF_BD)
 +                      kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
 +              else
 +                      kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
 +
 +              kvm_change_c0_guest_cause(cop0, (0xff),
 +                                        (exccode << CAUSEB_EXCCODE));
 +
 +              /* Set PC to the exception entry point */
 +              arch->pc = KVM_GUEST_KSEG0 + 0x180;
 +              kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
 +
 +              kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
 +                        exccode, kvm_read_c0_guest_epc(cop0),
 +                        kvm_read_c0_guest_badvaddr(cop0));
 +      } else {
 +              printk("Trying to deliver EXC when EXL is already set\n");
 +              er = EMULATE_FAIL;
 +      }
 +
 +      return er;
 +}
 +
 +enum emulation_result
 +kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
 +                       struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DONE;
 +      uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
 +      unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
 +
 +      int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
 +
 +      if (usermode) {
 +              switch (exccode) {
 +              case T_INT:
 +              case T_SYSCALL:
 +              case T_BREAK:
 +              case T_RES_INST:
 +                      break;
 +
 +              case T_COP_UNUSABLE:
 +                      if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
 +                              er = EMULATE_PRIV_FAIL;
 +                      break;
 +
 +              case T_TLB_MOD:
 +                      break;
 +
 +              case T_TLB_LD_MISS:
 +                      /* We we are accessing Guest kernel space, then send an address error exception to the guest */
 +                      if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
 +                              printk("%s: LD MISS @ %#lx\n", __func__,
 +                                     badvaddr);
 +                              cause &= ~0xff;
 +                              cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
 +                              er = EMULATE_PRIV_FAIL;
 +                      }
 +                      break;
 +
 +              case T_TLB_ST_MISS:
 +                      /* We we are accessing Guest kernel space, then send an address error exception to the guest */
 +                      if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
 +                              printk("%s: ST MISS @ %#lx\n", __func__,
 +                                     badvaddr);
 +                              cause &= ~0xff;
 +                              cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
 +                              er = EMULATE_PRIV_FAIL;
 +                      }
 +                      break;
 +
 +              case T_ADDR_ERR_ST:
 +                      printk("%s: address error ST @ %#lx\n", __func__,
 +                             badvaddr);
 +                      if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
 +                              cause &= ~0xff;
 +                              cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
 +                      }
 +                      er = EMULATE_PRIV_FAIL;
 +                      break;
 +              case T_ADDR_ERR_LD:
 +                      printk("%s: address error LD @ %#lx\n", __func__,
 +                             badvaddr);
 +                      if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
 +                              cause &= ~0xff;
 +                              cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
 +                      }
 +                      er = EMULATE_PRIV_FAIL;
 +                      break;
 +              default:
 +                      er = EMULATE_PRIV_FAIL;
 +                      break;
 +              }
 +      }
 +
 +      if (er == EMULATE_PRIV_FAIL) {
 +              kvm_mips_emulate_exc(cause, opc, run, vcpu);
 +      }
 +      return er;
 +}
 +
 +/* User Address (UA) fault, this could happen if
 + * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
 + *     case we pass on the fault to the guest kernel and let it handle it.
 + * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
 + *     case we inject the TLB from the Guest TLB into the shadow host TLB
 + */
 +enum emulation_result
 +kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
 +                      struct kvm_run *run, struct kvm_vcpu *vcpu)
 +{
 +      enum emulation_result er = EMULATE_DONE;
 +      uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
 +      unsigned long va = vcpu->arch.host_cp0_badvaddr;
 +      int index;
 +
 +      kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
 +                vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
 +
 +      /* KVM would not have got the exception if this entry was valid in the shadow host TLB
 +       * Check the Guest TLB, if the entry is not there then send the guest an
 +       * exception. The guest exc handler should then inject an entry into the
 +       * guest TLB
 +       */
 +      index = kvm_mips_guest_tlb_lookup(vcpu,
 +                                        (va & VPN2_MASK) |
++                                        ASID_MASK(kvm_read_c0_guest_entryhi
++                                         (vcpu->arch.cop0)));
 +      if (index < 0) {
 +              if (exccode == T_TLB_LD_MISS) {
 +                      er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
 +              } else if (exccode == T_TLB_ST_MISS) {
 +                      er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
 +              } else {
 +                      printk("%s: invalid exc code: %d\n", __func__, exccode);
 +                      er = EMULATE_FAIL;
 +              }
 +      } else {
 +              struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
 +
 +              /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
 +              if (!TLB_IS_VALID(*tlb, va)) {
 +                      if (exccode == T_TLB_LD_MISS) {
 +                              er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
 +                                                              vcpu);
 +                      } else if (exccode == T_TLB_ST_MISS) {
 +                              er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
 +                                                              vcpu);
 +                      } else {
 +                              printk("%s: invalid exc code: %d\n", __func__,
 +                                     exccode);
 +                              er = EMULATE_FAIL;
 +                      }
 +              } else {
 +#ifdef DEBUG
 +                      kvm_debug
 +                          ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
 +                           tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
 +#endif
 +                      /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
 +                      kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
 +                                                           NULL);
 +              }
 +      }
 +
 +      return er;
 +}
index e3f0d9b8b6c59604fdb60e67d3a230493da5a49f,0000000000000000000000000000000000000000..89511a9258d394f2d540e722fdaaa8f5aa6a82f1
mode 100644,000000..100644
--- /dev/null
@@@ -1,932 -1,0 +1,928 @@@
-       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
 +/*
 +* This file is subject to the terms and conditions of the GNU General Public
 +* License.  See the file "COPYING" in the main directory of this archive
 +* for more details.
 +*
 +* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
 +* TLB handlers run from KSEG0
 +*
 +* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 +* Authors: Sanjay Lal <sanjayl@kymasys.com>
 +*/
 +
 +#include <linux/init.h>
 +#include <linux/sched.h>
 +#include <linux/smp.h>
 +#include <linux/mm.h>
 +#include <linux/delay.h>
 +#include <linux/module.h>
 +#include <linux/kvm_host.h>
 +
 +#include <asm/cpu.h>
 +#include <asm/bootinfo.h>
 +#include <asm/mmu_context.h>
 +#include <asm/pgtable.h>
 +#include <asm/cacheflush.h>
 +
 +#undef CONFIG_MIPS_MT
 +#include <asm/r4kcache.h>
 +#define CONFIG_MIPS_MT
 +
 +#define KVM_GUEST_PC_TLB    0
 +#define KVM_GUEST_SP_TLB    1
 +
 +#define PRIx64 "llx"
 +
 +/* Use VZ EntryHi.EHINV to invalidate TLB entries */
 +#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
 +
 +atomic_t kvm_mips_instance;
 +EXPORT_SYMBOL(kvm_mips_instance);
 +
 +/* These function pointers are initialized once the KVM module is loaded */
 +pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
 +EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
 +
 +void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
 +EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
 +
 +bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
 +EXPORT_SYMBOL(kvm_mips_is_error_pfn);
 +
 +uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 +{
-       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
++      return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
 +}
 +
 +
 +uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 +{
-       printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
++      return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
 +}
 +
 +inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
 +{
 +      return vcpu->kvm->arch.commpage_tlb;
 +}
 +
 +
 +/*
 + * Structure defining an tlb entry data set.
 + */
 +
 +void kvm_mips_dump_host_tlbs(void)
 +{
 +      unsigned long old_entryhi;
 +      unsigned long old_pagemask;
 +      struct kvm_mips_tlb tlb;
 +      unsigned long flags;
 +      int i;
 +
 +      local_irq_save(flags);
 +
 +      old_entryhi = read_c0_entryhi();
 +      old_pagemask = read_c0_pagemask();
 +
 +      printk("HOST TLBs:\n");
-                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
++      printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
 +
 +      for (i = 0; i < current_cpu_data.tlbsize; i++) {
 +              write_c0_index(i);
 +              mtc0_tlbw_hazard();
 +
 +              tlb_read();
 +              tlbw_use_hazard();
 +
 +              tlb.tlb_hi = read_c0_entryhi();
 +              tlb.tlb_lo0 = read_c0_entrylo0();
 +              tlb.tlb_lo1 = read_c0_entrylo1();
 +              tlb.tlb_mask = read_c0_pagemask();
 +
 +              printk("TLB%c%3d Hi 0x%08lx ",
 +                     (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
 +                     i, tlb.tlb_hi);
 +              printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
 +                     (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
 +                     (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
 +                     (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
 +                     (tlb.tlb_lo0 >> 3) & 7);
 +              printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
 +                     (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
 +                     (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
 +                     (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
 +                     (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 +      }
 +      write_c0_entryhi(old_entryhi);
 +      write_c0_pagemask(old_pagemask);
 +      mtc0_tlbw_hazard();
 +      local_irq_restore(flags);
 +}
 +
 +void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      struct kvm_mips_tlb tlb;
 +      int i;
 +
 +      printk("Guest TLBs:\n");
 +      printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
 +
 +      for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 +              tlb = vcpu->arch.guest_tlb[i];
 +              printk("TLB%c%3d Hi 0x%08lx ",
 +                     (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
 +                     i, tlb.tlb_hi);
 +              printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
 +                     (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
 +                     (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
 +                     (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
 +                     (tlb.tlb_lo0 >> 3) & 7);
 +              printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
 +                     (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
 +                     (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
 +                     (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
 +                     (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 +      }
 +}
 +
 +void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
 +{
 +      int i;
 +      volatile struct kvm_mips_tlb tlb;
 +
 +      printk("Shadow TLBs:\n");
 +      for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 +              tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
 +              printk("TLB%c%3d Hi 0x%08lx ",
 +                     (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
 +                     i, tlb.tlb_hi);
 +              printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
 +                     (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
 +                     (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
 +                     (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
 +                     (tlb.tlb_lo0 >> 3) & 7);
 +              printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
 +                     (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
 +                     (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
 +                     (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
 +                     (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 +      }
 +}
 +
 +static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
 +{
 +      pfn_t pfn;
 +
 +      if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
 +              return;
 +
 +      pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
 +
 +      if (kvm_mips_is_error_pfn(pfn)) {
 +              panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
 +      }
 +
 +      kvm->arch.guest_pmap[gfn] = pfn;
 +      return;
 +}
 +
 +/* Translate guest KSEG0 addresses to Host PA */
 +unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
 +      unsigned long gva)
 +{
 +      gfn_t gfn;
 +      uint32_t offset = gva & ~PAGE_MASK;
 +      struct kvm *kvm = vcpu->kvm;
 +
 +      if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
 +              kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
 +                      __builtin_return_address(0), gva);
 +              return KVM_INVALID_PAGE;
 +      }
 +
 +      gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
 +
 +      if (gfn >= kvm->arch.guest_pmap_npages) {
 +              kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
 +                      gva);
 +              return KVM_INVALID_PAGE;
 +      }
 +      kvm_mips_map_page(vcpu->kvm, gfn);
 +      return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
 +}
 +
 +/* XXXKYMA: Must be called with interrupts disabled */
 +/* set flush_dcache_mask == 0 if no dcache flush required */
 +int
 +kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
 +      unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
 +{
 +      unsigned long flags;
 +      unsigned long old_entryhi;
 +      volatile int idx;
 +
 +      local_irq_save(flags);
 +
 +
 +      old_entryhi = read_c0_entryhi();
 +      write_c0_entryhi(entryhi);
 +      mtc0_tlbw_hazard();
 +
 +      tlb_probe();
 +      tlb_probe_hazard();
 +      idx = read_c0_index();
 +
 +      if (idx > current_cpu_data.tlbsize) {
 +              kvm_err("%s: Invalid Index: %d\n", __func__, idx);
 +              kvm_mips_dump_host_tlbs();
 +              return -1;
 +      }
 +
 +      if (idx < 0) {
 +              idx = read_c0_random() % current_cpu_data.tlbsize;
 +              write_c0_index(idx);
 +              mtc0_tlbw_hazard();
 +      }
 +      write_c0_entrylo0(entrylo0);
 +      write_c0_entrylo1(entrylo1);
 +      mtc0_tlbw_hazard();
 +
 +      tlb_write_indexed();
 +      tlbw_use_hazard();
 +
 +#ifdef DEBUG
 +      if (debug) {
 +              kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
 +                        "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
 +                        vcpu->arch.pc, idx, read_c0_entryhi(),
 +                        read_c0_entrylo0(), read_c0_entrylo1());
 +      }
 +#endif
 +
 +      /* Flush D-cache */
 +      if (flush_dcache_mask) {
 +              if (entrylo0 & MIPS3_PG_V) {
 +                      ++vcpu->stat.flush_dcache_exits;
 +                      flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
 +              }
 +              if (entrylo1 & MIPS3_PG_V) {
 +                      ++vcpu->stat.flush_dcache_exits;
 +                      flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
 +                              (0x1 << PAGE_SHIFT));
 +              }
 +      }
 +
 +      /* Restore old ASID */
 +      write_c0_entryhi(old_entryhi);
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +      local_irq_restore(flags);
 +      return 0;
 +}
 +
 +
 +/* XXXKYMA: Must be called with interrupts disabled */
 +int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
 +      struct kvm_vcpu *vcpu)
 +{
 +      gfn_t gfn;
 +      pfn_t pfn0, pfn1;
 +      unsigned long vaddr = 0;
 +      unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
 +      int even;
 +      struct kvm *kvm = vcpu->kvm;
 +      const int flush_dcache_mask = 0;
 +
 +
 +      if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
 +              kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
 +              kvm_mips_dump_host_tlbs();
 +              return -1;
 +      }
 +
 +      gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
 +      if (gfn >= kvm->arch.guest_pmap_npages) {
 +              kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
 +                      gfn, badvaddr);
 +              kvm_mips_dump_host_tlbs();
 +              return -1;
 +      }
 +      even = !(gfn & 0x1);
 +      vaddr = badvaddr & (PAGE_MASK << 1);
 +
 +      kvm_mips_map_page(vcpu->kvm, gfn);
 +      kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
 +
 +      if (even) {
 +              pfn0 = kvm->arch.guest_pmap[gfn];
 +              pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
 +      } else {
 +              pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
 +              pfn1 = kvm->arch.guest_pmap[gfn];
 +      }
 +
 +      entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
 +      entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
 +                      (0x1 << 1);
 +      entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
 +                      (0x1 << 1);
 +
 +      return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
 +                                     flush_dcache_mask);
 +}
 +
 +int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
 +      struct kvm_vcpu *vcpu)
 +{
 +      pfn_t pfn0, pfn1;
 +      unsigned long flags, old_entryhi = 0, vaddr = 0;
 +      unsigned long entrylo0 = 0, entrylo1 = 0;
 +
 +
 +      pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
 +      pfn1 = 0;
 +      entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
 +                      (0x1 << 1);
 +      entrylo1 = 0;
 +
 +      local_irq_save(flags);
 +
 +      old_entryhi = read_c0_entryhi();
 +      vaddr = badvaddr & (PAGE_MASK << 1);
 +      write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
 +      mtc0_tlbw_hazard();
 +      write_c0_entrylo0(entrylo0);
 +      mtc0_tlbw_hazard();
 +      write_c0_entrylo1(entrylo1);
 +      mtc0_tlbw_hazard();
 +      write_c0_index(kvm_mips_get_commpage_asid(vcpu));
 +      mtc0_tlbw_hazard();
 +      tlb_write_indexed();
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +
 +#ifdef DEBUG
 +      kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
 +           vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
 +           read_c0_entrylo0(), read_c0_entrylo1());
 +#endif
 +
 +      /* Restore old ASID */
 +      write_c0_entryhi(old_entryhi);
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +      local_irq_restore(flags);
 +
 +      return 0;
 +}
 +
 +int
 +kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
 +      struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
 +{
 +      unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
 +      struct kvm *kvm = vcpu->kvm;
 +      pfn_t pfn0, pfn1;
 +
 +
 +      if ((tlb->tlb_hi & VPN2_MASK) == 0) {
 +              pfn0 = 0;
 +              pfn1 = 0;
 +      } else {
 +              kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
 +              kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
 +
 +              pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
 +              pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
 +      }
 +
 +      if (hpa0)
 +              *hpa0 = pfn0 << PAGE_SHIFT;
 +
 +      if (hpa1)
 +              *hpa1 = pfn1 << PAGE_SHIFT;
 +
 +      /* Get attributes from the Guest TLB */
 +      entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
 +                      kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
 +      entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
 +                      (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
 +      entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
 +                      (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
 +
 +#ifdef DEBUG
 +      kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
 +                tlb->tlb_lo0, tlb->tlb_lo1);
 +#endif
 +
 +      return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
 +                                     tlb->tlb_mask);
 +}
 +
 +int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 +{
 +      int i;
 +      int index = -1;
 +      struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
 +
 +
 +      for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 +              if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
-       if (!((asid += ASID_INC) & ASID_MASK)) {
++                      (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
 +                      index = i;
 +                      break;
 +              }
 +      }
 +
 +#ifdef DEBUG
 +      kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
 +                __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
 +#endif
 +
 +      return index;
 +}
 +
 +int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
 +{
 +      unsigned long old_entryhi, flags;
 +      volatile int idx;
 +
 +
 +      local_irq_save(flags);
 +
 +      old_entryhi = read_c0_entryhi();
 +
 +      if (KVM_GUEST_KERNEL_MODE(vcpu))
 +              write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
 +      else {
 +              write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
 +      }
 +
 +      mtc0_tlbw_hazard();
 +
 +      tlb_probe();
 +      tlb_probe_hazard();
 +      idx = read_c0_index();
 +
 +      /* Restore old ASID */
 +      write_c0_entryhi(old_entryhi);
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +
 +      local_irq_restore(flags);
 +
 +#ifdef DEBUG
 +      kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
 +#endif
 +
 +      return idx;
 +}
 +
 +int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
 +{
 +      int idx;
 +      unsigned long flags, old_entryhi;
 +
 +      local_irq_save(flags);
 +
 +
 +      old_entryhi = read_c0_entryhi();
 +
 +      write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
 +      mtc0_tlbw_hazard();
 +
 +      tlb_probe();
 +      tlb_probe_hazard();
 +      idx = read_c0_index();
 +
 +      if (idx >= current_cpu_data.tlbsize)
 +              BUG();
 +
 +      if (idx > 0) {
 +              write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 +              mtc0_tlbw_hazard();
 +
 +              write_c0_entrylo0(0);
 +              mtc0_tlbw_hazard();
 +
 +              write_c0_entrylo1(0);
 +              mtc0_tlbw_hazard();
 +
 +              tlb_write_indexed();
 +              mtc0_tlbw_hazard();
 +      }
 +
 +      write_c0_entryhi(old_entryhi);
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +
 +      local_irq_restore(flags);
 +
 +#ifdef DEBUG
 +      if (idx > 0) {
 +              kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
 +                        (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
 +      }
 +#endif
 +
 +      return 0;
 +}
 +
 +/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
 +int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
 +{
 +      unsigned long flags, old_entryhi;
 +
 +      if (index >= current_cpu_data.tlbsize)
 +              BUG();
 +
 +      local_irq_save(flags);
 +
 +
 +      old_entryhi = read_c0_entryhi();
 +
 +      write_c0_entryhi(UNIQUE_ENTRYHI(index));
 +      mtc0_tlbw_hazard();
 +
 +      write_c0_index(index);
 +      mtc0_tlbw_hazard();
 +
 +      write_c0_entrylo0(0);
 +      mtc0_tlbw_hazard();
 +
 +      write_c0_entrylo1(0);
 +      mtc0_tlbw_hazard();
 +
 +      tlb_write_indexed();
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +
 +      write_c0_entryhi(old_entryhi);
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +
 +      local_irq_restore(flags);
 +
 +      return 0;
 +}
 +
 +void kvm_mips_flush_host_tlb(int skip_kseg0)
 +{
 +      unsigned long flags;
 +      unsigned long old_entryhi, entryhi;
 +      unsigned long old_pagemask;
 +      int entry = 0;
 +      int maxentry = current_cpu_data.tlbsize;
 +
 +
 +      local_irq_save(flags);
 +
 +      old_entryhi = read_c0_entryhi();
 +      old_pagemask = read_c0_pagemask();
 +
 +      /* Blast 'em all away. */
 +      for (entry = 0; entry < maxentry; entry++) {
 +
 +              write_c0_index(entry);
 +              mtc0_tlbw_hazard();
 +
 +              if (skip_kseg0) {
 +                      tlb_read();
 +                      tlbw_use_hazard();
 +
 +                      entryhi = read_c0_entryhi();
 +
 +                      /* Don't blow away guest kernel entries */
 +                      if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
 +                              continue;
 +                      }
 +              }
 +
 +              /* Make sure all entries differ. */
 +              write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 +              mtc0_tlbw_hazard();
 +              write_c0_entrylo0(0);
 +              mtc0_tlbw_hazard();
 +              write_c0_entrylo1(0);
 +              mtc0_tlbw_hazard();
 +
 +              tlb_write_indexed();
 +              mtc0_tlbw_hazard();
 +      }
 +
 +      tlbw_use_hazard();
 +
 +      write_c0_entryhi(old_entryhi);
 +      write_c0_pagemask(old_pagemask);
 +      mtc0_tlbw_hazard();
 +      tlbw_use_hazard();
 +
 +      local_irq_restore(flags);
 +}
 +
 +void
 +kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
 +                      struct kvm_vcpu *vcpu)
 +{
 +      unsigned long asid = asid_cache(cpu);
 +
-                       write_c0_entryhi(vcpu->arch.
-                                        preempt_entryhi & ASID_MASK);
++      if (!(ASID_MASK(ASID_INC(asid)))) {
 +              if (cpu_has_vtag_icache) {
 +                      flush_icache_all();
 +              }
 +
 +              kvm_local_flush_tlb_all();      /* start new asid cycle */
 +
 +              if (!asid)      /* fix version if needed */
 +                      asid = ASID_FIRST_VERSION;
 +      }
 +
 +      cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 +}
 +
 +void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
 +{
 +      unsigned long flags;
 +      unsigned long old_entryhi;
 +      unsigned long old_pagemask;
 +      int entry = 0;
 +      int cpu = smp_processor_id();
 +
 +      local_irq_save(flags);
 +
 +      old_entryhi = read_c0_entryhi();
 +      old_pagemask = read_c0_pagemask();
 +
 +      for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
 +              write_c0_index(entry);
 +              mtc0_tlbw_hazard();
 +              tlb_read();
 +              tlbw_use_hazard();
 +
 +              vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
 +              vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
 +              vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
 +              vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
 +      }
 +
 +      write_c0_entryhi(old_entryhi);
 +      write_c0_pagemask(old_pagemask);
 +      mtc0_tlbw_hazard();
 +
 +      local_irq_restore(flags);
 +
 +}
 +
 +void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
 +{
 +      unsigned long flags;
 +      unsigned long old_ctx;
 +      int entry;
 +      int cpu = smp_processor_id();
 +
 +      local_irq_save(flags);
 +
 +      old_ctx = read_c0_entryhi();
 +
 +      for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
 +              write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
 +              mtc0_tlbw_hazard();
 +              write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
 +              write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
 +
 +              write_c0_index(entry);
 +              mtc0_tlbw_hazard();
 +
 +              tlb_write_indexed();
 +              tlbw_use_hazard();
 +      }
 +
 +      tlbw_use_hazard();
 +      write_c0_entryhi(old_ctx);
 +      mtc0_tlbw_hazard();
 +      local_irq_restore(flags);
 +}
 +
 +
 +void kvm_local_flush_tlb_all(void)
 +{
 +      unsigned long flags;
 +      unsigned long old_ctx;
 +      int entry = 0;
 +
 +      local_irq_save(flags);
 +      /* Save old context and create impossible VPN2 value */
 +      old_ctx = read_c0_entryhi();
 +      write_c0_entrylo0(0);
 +      write_c0_entrylo1(0);
 +
 +      /* Blast 'em all away. */
 +      while (entry < current_cpu_data.tlbsize) {
 +              /* Make sure all entries differ. */
 +              write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 +              write_c0_index(entry);
 +              mtc0_tlbw_hazard();
 +              tlb_write_indexed();
 +              entry++;
 +      }
 +      tlbw_use_hazard();
 +      write_c0_entryhi(old_ctx);
 +      mtc0_tlbw_hazard();
 +
 +      local_irq_restore(flags);
 +}
 +
 +void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
 +{
 +      int cpu, entry;
 +
 +      for_each_possible_cpu(cpu) {
 +              for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
 +                      vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
 +                          UNIQUE_ENTRYHI(entry);
 +                      vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
 +                      vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
 +                      vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
 +                          read_c0_pagemask();
 +#ifdef DEBUG
 +                      kvm_debug
 +                          ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
 +                           cpu, entry,
 +                           vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
 +                           vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
 +                           vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
 +#endif
 +              }
 +      }
 +}
 +
 +/* Restore ASID once we are scheduled back after preemption */
 +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 +{
 +      unsigned long flags;
 +      int newasid = 0;
 +
 +#ifdef DEBUG
 +      kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
 +#endif
 +
 +      /* Alocate new kernel and user ASIDs if needed */
 +
 +      local_irq_save(flags);
 +
 +      if (((vcpu->arch.
 +            guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
 +              kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
 +              vcpu->arch.guest_kernel_asid[cpu] =
 +                  vcpu->arch.guest_kernel_mm.context.asid[cpu];
 +              kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
 +              vcpu->arch.guest_user_asid[cpu] =
 +                  vcpu->arch.guest_user_mm.context.asid[cpu];
 +              newasid++;
 +
 +              kvm_info("[%d]: cpu_context: %#lx\n", cpu,
 +                       cpu_context(cpu, current->mm));
 +              kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
 +                       cpu, vcpu->arch.guest_kernel_asid[cpu]);
 +              kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
 +                       vcpu->arch.guest_user_asid[cpu]);
 +      }
 +
 +      if (vcpu->arch.last_sched_cpu != cpu) {
 +              kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
 +                       vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
 +      }
 +
 +      /* Only reload shadow host TLB if new ASIDs haven't been allocated */
 +#if 0
 +      if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
 +              kvm_mips_flush_host_tlb(0);
 +              kvm_shadow_tlb_load(vcpu);
 +      }
 +#endif
 +
 +      if (!newasid) {
 +              /* If we preempted while the guest was executing, then reload the pre-empted ASID */
 +              if (current->flags & PF_VCPU) {
-                               write_c0_entryhi(vcpu->arch.
-                                                guest_kernel_asid[cpu] &
-                                                ASID_MASK);
++                      write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
 +                      ehb();
 +              }
 +      } else {
 +              /* New ASIDs were allocated for the VM */
 +
 +              /* Were we in guest context? If so then the pre-empted ASID is no longer
 +               * valid, we need to set it to what it should be based on the mode of
 +               * the Guest (Kernel/User)
 +               */
 +              if (current->flags & PF_VCPU) {
 +                      if (KVM_GUEST_KERNEL_MODE(vcpu))
-                               write_c0_entryhi(vcpu->arch.
-                                                guest_user_asid[cpu] &
-                                                ASID_MASK);
++                              write_c0_entryhi(ASID_MASK(vcpu->arch.
++                                               guest_kernel_asid[cpu]));
 +                      else
-                                                     (kvm_read_c0_guest_entryhi
-                                                      (cop0) & ASID_MASK));
++                              write_c0_entryhi(ASID_MASK(vcpu->arch.
++                                               guest_user_asid[cpu]));
 +                      ehb();
 +              }
 +      }
 +
 +      local_irq_restore(flags);
 +
 +}
 +
 +/* ASID can change if another task is scheduled during preemption */
 +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 +{
 +      unsigned long flags;
 +      uint32_t cpu;
 +
 +      local_irq_save(flags);
 +
 +      cpu = smp_processor_id();
 +
 +
 +      vcpu->arch.preempt_entryhi = read_c0_entryhi();
 +      vcpu->arch.last_sched_cpu = cpu;
 +
 +#if 0
 +      if ((atomic_read(&kvm_mips_instance) > 1)) {
 +              kvm_shadow_tlb_put(vcpu);
 +      }
 +#endif
 +
 +      if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
 +           ASID_VERSION_MASK)) {
 +              kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
 +                        cpu_context(cpu, current->mm));
 +              drop_mmu_context(current->mm, cpu);
 +      }
 +      write_c0_entryhi(cpu_asid(cpu, current->mm));
 +      ehb();
 +
 +      local_irq_restore(flags);
 +}
 +
 +uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
 +{
 +      struct mips_coproc *cop0 = vcpu->arch.cop0;
 +      unsigned long paddr, flags;
 +      uint32_t inst;
 +      int index;
 +
 +      if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
 +          KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
 +              local_irq_save(flags);
 +              index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
 +              if (index >= 0) {
 +                      inst = *(opc);
 +              } else {
 +                      index =
 +                          kvm_mips_guest_tlb_lookup(vcpu,
 +                                                    ((unsigned long) opc & VPN2_MASK)
 +                                                    |
++                                                    ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
 +                      if (index < 0) {
 +                              kvm_err
 +                                  ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
 +                                   __func__, opc, vcpu, read_c0_entryhi());
 +                              kvm_mips_dump_host_tlbs();
 +                              local_irq_restore(flags);
 +                              return KVM_INVALID_INST;
 +                      }
 +                      kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
 +                                                           &vcpu->arch.
 +                                                           guest_tlb[index],
 +                                                           NULL, NULL);
 +                      inst = *(opc);
 +              }
 +              local_irq_restore(flags);
 +      } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
 +              paddr =
 +                  kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
 +                                                       (unsigned long) opc);
 +              inst = *(uint32_t *) CKSEG0ADDR(paddr);
 +      } else {
 +              kvm_err("%s: illegal address: %p\n", __func__, opc);
 +              return KVM_INVALID_INST;
 +      }
 +
 +      return inst;
 +}
 +
 +EXPORT_SYMBOL(kvm_local_flush_tlb_all);
 +EXPORT_SYMBOL(kvm_shadow_tlb_put);
 +EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
 +EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
 +EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
 +EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
 +EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
 +EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
 +EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
 +EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
 +EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
 +EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
 +EXPORT_SYMBOL(kvm_shadow_tlb_load);
 +EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
 +EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
 +EXPORT_SYMBOL(kvm_get_inst);
 +EXPORT_SYMBOL(kvm_arch_vcpu_load);
 +EXPORT_SYMBOL(kvm_arch_vcpu_put);
Simple merge
Simple merge
index 3b3822afb059f7393663f0bb7aca2b0eb60629b2,0a68f2a4e69019244a658d8e51c68edbbe894980..4d46d37875765a3d3024bdee0d043fae9b7a23f7
@@@ -2162,11 -2256,9 +2256,12 @@@ void __cpuinit build_tlb_refill_handler
        case CPU_TX3922:
        case CPU_TX3927:
  #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 -              build_r3000_tlb_refill_handler();
+               setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
 +              if (cpu_has_local_ebase)
 +                      build_r3000_tlb_refill_handler();
                if (!run_once) {
 +                      if (!cpu_has_local_ebase)
 +                              build_r3000_tlb_refill_handler();
                        build_r3000_tlb_load_handler();
                        build_r3000_tlb_store_handler();
                        build_r3000_tlb_modify_handler();
index bc6ac00c0d5712eda01313ebfd399362028f2971,79e5169eabd39056864612cb28628ee96d169267..0ad305f75802bc64250ad01be6e76b1826ea95a2
@@@ -74,23 -71,10 +71,25 @@@ static void __init estimate_frequencies
  {
        unsigned long flags;
        unsigned int count, start;
+ #ifdef CONFIG_IRQ_GIC
        unsigned int giccount = 0, gicstart = 0;
+ #endif
  
 +#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
 +      unsigned int prid = read_c0_prid() & 0xffff00;
 +
 +      /*
 +       * XXXKYMA: hardwire the CPU frequency to Host Freq/4
 +       */
 +      count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
 +      if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
 +          (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
 +              count *= 2;
 +
 +      mips_hpt_frequency = count;
 +      return;
 +#endif
 +
        local_irq_save(flags);
  
        /* Start counter exactly on falling edge of update flag. */